1 /*
2  * Copyright 2010 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "GrBufferAllocPool.h"
9 
10 #include "GrBuffer.h"
11 #include "GrCaps.h"
12 #include "GrContext.h"
13 #include "GrContextPriv.h"
14 #include "GrGpu.h"
15 #include "GrResourceProvider.h"
16 #include "GrTypes.h"
17 #include "SkMacros.h"
18 #include "SkSafeMath.h"
19 #include "SkTraceEvent.h"
20 
21 #ifdef SK_DEBUG
22     #define VALIDATE validate
23 #else
24     static void VALIDATE(bool = false) {}
25 #endif
26 
27 #define UNMAP_BUFFER(block)                                                               \
28 do {                                                                                      \
29     TRACE_EVENT_INSTANT1("skia.gpu",                                                      \
30                          "GrBufferAllocPool Unmapping Buffer",                            \
31                          TRACE_EVENT_SCOPE_THREAD,                                        \
32                          "percent_unwritten",                                             \
33                          (float)((block).fBytesFree) / (block).fBuffer->gpuMemorySize()); \
34     (block).fBuffer->unmap();                                                             \
35 } while (false)
36 
37 constexpr size_t GrBufferAllocPool::kDefaultBufferSize;
38 
39 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, GrBufferType bufferType, void* initialBuffer)
40         : fBlocks(8), fGpu(gpu), fBufferType(bufferType), fInitialCpuData(initialBuffer) {
41     if (fInitialCpuData) {
42         fCpuDataSize = kDefaultBufferSize;
43         fCpuData = fInitialCpuData;
44     }
45 }
46 
47 void GrBufferAllocPool::deleteBlocks() {
48     if (fBlocks.count()) {
49         GrBuffer* buffer = fBlocks.back().fBuffer.get();
50         if (buffer->isMapped()) {
51             UNMAP_BUFFER(fBlocks.back());
52         }
53     }
54     while (!fBlocks.empty()) {
55         this->destroyBlock();
56     }
57     SkASSERT(!fBufferPtr);
58 }
59 
60 GrBufferAllocPool::~GrBufferAllocPool() {
61     VALIDATE();
62     this->deleteBlocks();
63     if (fCpuData != fInitialCpuData) {
64         sk_free(fCpuData);
65     }
66 }
67 
68 void GrBufferAllocPool::reset() {
69     VALIDATE();
70     fBytesInUse = 0;
71     this->deleteBlocks();
72     this->resetCpuData(0);
73     VALIDATE();
74 }
75 
76 void GrBufferAllocPool::unmap() {
77     VALIDATE();
78 
79     if (fBufferPtr) {
80         BufferBlock& block = fBlocks.back();
81         if (block.fBuffer->isMapped()) {
82             UNMAP_BUFFER(block);
83         } else {
84             size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree;
85             this->flushCpuData(fBlocks.back(), flushSize);
86         }
87         fBufferPtr = nullptr;
88     }
89     VALIDATE();
90 }
91 
92 #ifdef SK_DEBUG
93 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
94     bool wasDestroyed = false;
95     if (fBufferPtr) {
96         SkASSERT(!fBlocks.empty());
97         if (!fBlocks.back().fBuffer->isMapped()) {
98             SkASSERT(fCpuData == fBufferPtr);
99         }
100     } else {
101         SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped());
102     }
103     size_t bytesInUse = 0;
104     for (int i = 0; i < fBlocks.count() - 1; ++i) {
105         SkASSERT(!fBlocks[i].fBuffer->isMapped());
106     }
107     for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) {
108         if (fBlocks[i].fBuffer->wasDestroyed()) {
109             wasDestroyed = true;
110         } else {
111             size_t bytes = fBlocks[i].fBuffer->gpuMemorySize() - fBlocks[i].fBytesFree;
112             bytesInUse += bytes;
113             SkASSERT(bytes || unusedBlockAllowed);
114         }
115     }
116 
117     if (!wasDestroyed) {
118         SkASSERT(bytesInUse == fBytesInUse);
119         if (unusedBlockAllowed) {
120             SkASSERT((fBytesInUse && !fBlocks.empty()) ||
121                      (!fBytesInUse && (fBlocks.count() < 2)));
122         } else {
123             SkASSERT((0 == fBytesInUse) == fBlocks.empty());
124         }
125     }
126 }
127 #endif
128 
129 void* GrBufferAllocPool::makeSpace(size_t size,
130                                    size_t alignment,
131                                    sk_sp<const GrBuffer>* buffer,
132                                    size_t* offset) {
133     VALIDATE();
134 
135     SkASSERT(buffer);
136     SkASSERT(offset);
137 
138     if (fBufferPtr) {
139         BufferBlock& back = fBlocks.back();
140         size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
141         size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
142         SkSafeMath safeMath;
143         size_t alignedSize = safeMath.add(pad, size);
144         if (!safeMath.ok()) {
145             return nullptr;
146         }
147         if (alignedSize <= back.fBytesFree) {
148             memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
149             usedBytes += pad;
150             *offset = usedBytes;
151             *buffer = back.fBuffer;
152             back.fBytesFree -= alignedSize;
153             fBytesInUse += alignedSize;
154             VALIDATE();
155             return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
156         }
157     }
158 
159     // We could honor the space request using by a partial update of the current
160     // VB (if there is room). But we don't currently use draw calls to GL that
161     // allow the driver to know that previously issued draws won't read from
162     // the part of the buffer we update. Also, the GL buffer implementation
163     // may be cheating on the actual buffer size by shrinking the buffer on
164     // updateData() if the amount of data passed is less than the full buffer
165     // size.
166 
167     if (!this->createBlock(size)) {
168         return nullptr;
169     }
170     SkASSERT(fBufferPtr);
171 
172     *offset = 0;
173     BufferBlock& back = fBlocks.back();
174     *buffer = back.fBuffer;
175     back.fBytesFree -= size;
176     fBytesInUse += size;
177     VALIDATE();
178     return fBufferPtr;
179 }
180 
181 void* GrBufferAllocPool::makeSpaceAtLeast(size_t minSize,
182                                           size_t fallbackSize,
183                                           size_t alignment,
184                                           sk_sp<const GrBuffer>* buffer,
185                                           size_t* offset,
186                                           size_t* actualSize) {
187     VALIDATE();
188 
189     SkASSERT(buffer);
190     SkASSERT(offset);
191     SkASSERT(actualSize);
192 
193     if (fBufferPtr) {
194         BufferBlock& back = fBlocks.back();
195         size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
196         size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
197         if ((minSize + pad) <= back.fBytesFree) {
198             // Consume padding first, to make subsequent alignment math easier
199             memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
200             usedBytes += pad;
201             back.fBytesFree -= pad;
202             fBytesInUse += pad;
203 
204             // Give caller all remaining space in this block up to fallbackSize (but aligned
205             // correctly)
206             size_t size;
207             if (back.fBytesFree >= fallbackSize) {
208                 SkASSERT(GrSizeAlignDown(fallbackSize, alignment) == fallbackSize);
209                 size = fallbackSize;
210             } else {
211                 size = GrSizeAlignDown(back.fBytesFree, alignment);
212             }
213             *offset = usedBytes;
214             *buffer = back.fBuffer;
215             *actualSize = size;
216             back.fBytesFree -= size;
217             fBytesInUse += size;
218             VALIDATE();
219             return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
220         }
221     }
222 
223     // We could honor the space request using by a partial update of the current
224     // VB (if there is room). But we don't currently use draw calls to GL that
225     // allow the driver to know that previously issued draws won't read from
226     // the part of the buffer we update. Also, the GL buffer implementation
227     // may be cheating on the actual buffer size by shrinking the buffer on
228     // updateData() if the amount of data passed is less than the full buffer
229     // size.
230 
231     if (!this->createBlock(fallbackSize)) {
232         return nullptr;
233     }
234     SkASSERT(fBufferPtr);
235 
236     *offset = 0;
237     BufferBlock& back = fBlocks.back();
238     *buffer = back.fBuffer;
239     *actualSize = fallbackSize;
240     back.fBytesFree -= fallbackSize;
241     fBytesInUse += fallbackSize;
242     VALIDATE();
243     return fBufferPtr;
244 }
245 
246 void GrBufferAllocPool::putBack(size_t bytes) {
247     VALIDATE();
248 
249     while (bytes) {
250         // caller shouldn't try to put back more than they've taken
251         SkASSERT(!fBlocks.empty());
252         BufferBlock& block = fBlocks.back();
253         size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree;
254         if (bytes >= bytesUsed) {
255             bytes -= bytesUsed;
256             fBytesInUse -= bytesUsed;
257             // if we locked a vb to satisfy the make space and we're releasing
258             // beyond it, then unmap it.
259             if (block.fBuffer->isMapped()) {
260                 UNMAP_BUFFER(block);
261             }
262             this->destroyBlock();
263         } else {
264             block.fBytesFree += bytes;
265             fBytesInUse -= bytes;
266             bytes = 0;
267             break;
268         }
269     }
270 
271     VALIDATE();
272 }
273 
274 bool GrBufferAllocPool::createBlock(size_t requestSize) {
275     size_t size = SkTMax(requestSize, kDefaultBufferSize);
276 
277     VALIDATE();
278 
279     BufferBlock& block = fBlocks.push_back();
280 
281     block.fBuffer = this->getBuffer(size);
282     if (!block.fBuffer) {
283         fBlocks.pop_back();
284         return false;
285     }
286 
287     block.fBytesFree = block.fBuffer->gpuMemorySize();
288     if (fBufferPtr) {
289         SkASSERT(fBlocks.count() > 1);
290         BufferBlock& prev = fBlocks.fromBack(1);
291         if (prev.fBuffer->isMapped()) {
292             UNMAP_BUFFER(prev);
293         } else {
294             this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytesFree);
295         }
296         fBufferPtr = nullptr;
297     }
298 
299     SkASSERT(!fBufferPtr);
300 
301     // If the buffer is CPU-backed we map it because it is free to do so and saves a copy.
302     // Otherwise when buffer mapping is supported we map if the buffer size is greater than the
303     // threshold.
304     bool attemptMap = block.fBuffer->isCPUBacked();
305     if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
306         attemptMap = size > fGpu->caps()->bufferMapThreshold();
307     }
308 
309     if (attemptMap) {
310         fBufferPtr = block.fBuffer->map();
311     }
312 
313     if (!fBufferPtr) {
314         fBufferPtr = this->resetCpuData(block.fBytesFree);
315     }
316 
317     VALIDATE(true);
318 
319     return true;
320 }
321 
322 void GrBufferAllocPool::destroyBlock() {
323     SkASSERT(!fBlocks.empty());
324     SkASSERT(!fBlocks.back().fBuffer->isMapped());
325     fBlocks.pop_back();
326     fBufferPtr = nullptr;
327 }
328 
329 void* GrBufferAllocPool::resetCpuData(size_t newSize) {
330     if (newSize <= fCpuDataSize) {
331         SkASSERT(!newSize || fCpuData);
332         return fCpuData;
333     }
334     if (fCpuData != fInitialCpuData) {
335         sk_free(fCpuData);
336     }
337     if (fGpu->caps()->mustClearUploadedBufferData()) {
338         fCpuData = sk_calloc_throw(newSize);
339     } else {
340         fCpuData = sk_malloc_throw(newSize);
341     }
342     fCpuDataSize = newSize;
343     return fCpuData;
344 }
345 
346 
347 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
348     GrBuffer* buffer = block.fBuffer.get();
349     SkASSERT(buffer);
350     SkASSERT(!buffer->isMapped());
351     SkASSERT(fCpuData == fBufferPtr);
352     SkASSERT(flushSize <= buffer->gpuMemorySize());
353     VALIDATE(true);
354 
355     if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
356         flushSize > fGpu->caps()->bufferMapThreshold()) {
357         void* data = buffer->map();
358         if (data) {
359             memcpy(data, fBufferPtr, flushSize);
360             UNMAP_BUFFER(block);
361             return;
362         }
363     }
364     buffer->updateData(fBufferPtr, flushSize);
365     VALIDATE(true);
366 }
367 
368 sk_sp<GrBuffer> GrBufferAllocPool::getBuffer(size_t size) {
369     auto resourceProvider = fGpu->getContext()->contextPriv().resourceProvider();
370 
371     return resourceProvider->createBuffer(size, fBufferType, kDynamic_GrAccessPattern,
372             GrResourceProvider::Flags::kNone);
373 }
374 
375 ////////////////////////////////////////////////////////////////////////////////
376 
377 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu, void* initialCpuBuffer)
378         : GrBufferAllocPool(gpu, kVertex_GrBufferType, initialCpuBuffer) {}
379 
380 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
381                                          int vertexCount,
382                                          sk_sp<const GrBuffer>* buffer,
383                                          int* startVertex) {
384     SkASSERT(vertexCount >= 0);
385     SkASSERT(buffer);
386     SkASSERT(startVertex);
387 
388     size_t offset SK_INIT_TO_AVOID_WARNING;
389     void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(vertexSize, vertexCount),
390                                      vertexSize,
391                                      buffer,
392                                      &offset);
393 
394     SkASSERT(0 == offset % vertexSize);
395     *startVertex = static_cast<int>(offset / vertexSize);
396     return ptr;
397 }
398 
399 void* GrVertexBufferAllocPool::makeSpaceAtLeast(size_t vertexSize, int minVertexCount,
400                                                 int fallbackVertexCount,
401                                                 sk_sp<const GrBuffer>* buffer, int* startVertex,
402                                                 int* actualVertexCount) {
403     SkASSERT(minVertexCount >= 0);
404     SkASSERT(fallbackVertexCount >= minVertexCount);
405     SkASSERT(buffer);
406     SkASSERT(startVertex);
407     SkASSERT(actualVertexCount);
408 
409     size_t offset SK_INIT_TO_AVOID_WARNING;
410     size_t actualSize SK_INIT_TO_AVOID_WARNING;
411     void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(vertexSize, minVertexCount),
412                                             SkSafeMath::Mul(vertexSize, fallbackVertexCount),
413                                             vertexSize,
414                                             buffer,
415                                             &offset,
416                                             &actualSize);
417 
418     SkASSERT(0 == offset % vertexSize);
419     *startVertex = static_cast<int>(offset / vertexSize);
420 
421     SkASSERT(0 == actualSize % vertexSize);
422     SkASSERT(actualSize >= vertexSize * minVertexCount);
423     *actualVertexCount = static_cast<int>(actualSize / vertexSize);
424 
425     return ptr;
426 }
427 
428 ////////////////////////////////////////////////////////////////////////////////
429 
430 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, void* initialCpuBuffer)
431         : GrBufferAllocPool(gpu, kIndex_GrBufferType, initialCpuBuffer) {}
432 
433 void* GrIndexBufferAllocPool::makeSpace(int indexCount, sk_sp<const GrBuffer>* buffer,
434                                         int* startIndex) {
435     SkASSERT(indexCount >= 0);
436     SkASSERT(buffer);
437     SkASSERT(startIndex);
438 
439     size_t offset SK_INIT_TO_AVOID_WARNING;
440     void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(indexCount, sizeof(uint16_t)),
441                                      sizeof(uint16_t),
442                                      buffer,
443                                      &offset);
444 
445     SkASSERT(0 == offset % sizeof(uint16_t));
446     *startIndex = static_cast<int>(offset / sizeof(uint16_t));
447     return ptr;
448 }
449 
450 void* GrIndexBufferAllocPool::makeSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
451                                                sk_sp<const GrBuffer>* buffer, int* startIndex,
452                                                int* actualIndexCount) {
453     SkASSERT(minIndexCount >= 0);
454     SkASSERT(fallbackIndexCount >= minIndexCount);
455     SkASSERT(buffer);
456     SkASSERT(startIndex);
457     SkASSERT(actualIndexCount);
458 
459     size_t offset SK_INIT_TO_AVOID_WARNING;
460     size_t actualSize SK_INIT_TO_AVOID_WARNING;
461     void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(minIndexCount, sizeof(uint16_t)),
462                                             SkSafeMath::Mul(fallbackIndexCount, sizeof(uint16_t)),
463                                             sizeof(uint16_t),
464                                             buffer,
465                                             &offset,
466                                             &actualSize);
467 
468     SkASSERT(0 == offset % sizeof(uint16_t));
469     *startIndex = static_cast<int>(offset / sizeof(uint16_t));
470 
471     SkASSERT(0 == actualSize % sizeof(uint16_t));
472     SkASSERT(actualSize >= minIndexCount * sizeof(uint16_t));
473     *actualIndexCount = static_cast<int>(actualSize / sizeof(uint16_t));
474     return ptr;
475 }
476