1 /*
2  * Copyright 2010 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 
9 #include "GrBufferAllocPool.h"
10 #include "GrBuffer.h"
11 #include "GrCaps.h"
12 #include "GrContext.h"
13 #include "GrGpu.h"
14 #include "GrResourceProvider.h"
15 #include "GrTypes.h"
16 
17 #include "SkTraceEvent.h"
18 
19 #ifdef SK_DEBUG
20     #define VALIDATE validate
21 #else
VALIDATE(bool=false)22     static void VALIDATE(bool = false) {}
23 #endif
24 
25 static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15;
26 static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12;
27 
28 // page size
29 #define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 15)
30 
31 #define UNMAP_BUFFER(block)                                                               \
32 do {                                                                                      \
33     TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"),                           \
34                          "GrBufferAllocPool Unmapping Buffer",                            \
35                          TRACE_EVENT_SCOPE_THREAD,                                        \
36                          "percent_unwritten",                                             \
37                          (float)((block).fBytesFree) / (block).fBuffer->gpuMemorySize()); \
38     (block).fBuffer->unmap();                                                             \
39 } while (false)
40 
GrBufferAllocPool(GrGpu * gpu,GrBufferType bufferType,size_t blockSize)41 GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
42                                      GrBufferType bufferType,
43                                      size_t blockSize)
44     : fBlocks(8) {
45 
46     fGpu = SkRef(gpu);
47     fCpuData = nullptr;
48     fBufferType = bufferType;
49     fBufferPtr = nullptr;
50     fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
51 
52     fBytesInUse = 0;
53 
54     fBufferMapThreshold = gpu->caps()->bufferMapThreshold();
55 }
56 
deleteBlocks()57 void GrBufferAllocPool::deleteBlocks() {
58     if (fBlocks.count()) {
59         GrBuffer* buffer = fBlocks.back().fBuffer;
60         if (buffer->isMapped()) {
61             UNMAP_BUFFER(fBlocks.back());
62         }
63     }
64     while (!fBlocks.empty()) {
65         this->destroyBlock();
66     }
67     SkASSERT(!fBufferPtr);
68 }
69 
~GrBufferAllocPool()70 GrBufferAllocPool::~GrBufferAllocPool() {
71     VALIDATE();
72     this->deleteBlocks();
73     sk_free(fCpuData);
74     fGpu->unref();
75 }
76 
reset()77 void GrBufferAllocPool::reset() {
78     VALIDATE();
79     fBytesInUse = 0;
80     this->deleteBlocks();
81     this->resetCpuData(0);      // delete all the cpu-side memory
82     VALIDATE();
83 }
84 
unmap()85 void GrBufferAllocPool::unmap() {
86     VALIDATE();
87 
88     if (fBufferPtr) {
89         BufferBlock& block = fBlocks.back();
90         if (block.fBuffer->isMapped()) {
91             UNMAP_BUFFER(block);
92         } else {
93             size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree;
94             this->flushCpuData(fBlocks.back(), flushSize);
95         }
96         fBufferPtr = nullptr;
97     }
98     VALIDATE();
99 }
100 
101 #ifdef SK_DEBUG
validate(bool unusedBlockAllowed) const102 void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
103     bool wasDestroyed = false;
104     if (fBufferPtr) {
105         SkASSERT(!fBlocks.empty());
106         if (fBlocks.back().fBuffer->isMapped()) {
107             GrBuffer* buf = fBlocks.back().fBuffer;
108             SkASSERT(buf->mapPtr() == fBufferPtr);
109         } else {
110             SkASSERT(fCpuData == fBufferPtr);
111         }
112     } else {
113         SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped());
114     }
115     size_t bytesInUse = 0;
116     for (int i = 0; i < fBlocks.count() - 1; ++i) {
117         SkASSERT(!fBlocks[i].fBuffer->isMapped());
118     }
119     for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) {
120         if (fBlocks[i].fBuffer->wasDestroyed()) {
121             wasDestroyed = true;
122         } else {
123             size_t bytes = fBlocks[i].fBuffer->gpuMemorySize() - fBlocks[i].fBytesFree;
124             bytesInUse += bytes;
125             SkASSERT(bytes || unusedBlockAllowed);
126         }
127     }
128 
129     if (!wasDestroyed) {
130         SkASSERT(bytesInUse == fBytesInUse);
131         if (unusedBlockAllowed) {
132             SkASSERT((fBytesInUse && !fBlocks.empty()) ||
133                      (!fBytesInUse && (fBlocks.count() < 2)));
134         } else {
135             SkASSERT((0 == fBytesInUse) == fBlocks.empty());
136         }
137     }
138 }
139 #endif
140 
makeSpace(size_t size,size_t alignment,const GrBuffer ** buffer,size_t * offset)141 void* GrBufferAllocPool::makeSpace(size_t size,
142                                    size_t alignment,
143                                    const GrBuffer** buffer,
144                                    size_t* offset) {
145     VALIDATE();
146 
147     SkASSERT(buffer);
148     SkASSERT(offset);
149 
150     if (fBufferPtr) {
151         BufferBlock& back = fBlocks.back();
152         size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
153         size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
154         if ((size + pad) <= back.fBytesFree) {
155             memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
156             usedBytes += pad;
157             *offset = usedBytes;
158             *buffer = back.fBuffer;
159             back.fBytesFree -= size + pad;
160             fBytesInUse += size + pad;
161             VALIDATE();
162             return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
163         }
164     }
165 
166     // We could honor the space request using by a partial update of the current
167     // VB (if there is room). But we don't currently use draw calls to GL that
168     // allow the driver to know that previously issued draws won't read from
169     // the part of the buffer we update. Also, the GL buffer implementation
170     // may be cheating on the actual buffer size by shrinking the buffer on
171     // updateData() if the amount of data passed is less than the full buffer
172     // size.
173 
174     if (!this->createBlock(size)) {
175         return nullptr;
176     }
177     SkASSERT(fBufferPtr);
178 
179     *offset = 0;
180     BufferBlock& back = fBlocks.back();
181     *buffer = back.fBuffer;
182     back.fBytesFree -= size;
183     fBytesInUse += size;
184     VALIDATE();
185     return fBufferPtr;
186 }
187 
putBack(size_t bytes)188 void GrBufferAllocPool::putBack(size_t bytes) {
189     VALIDATE();
190 
191     while (bytes) {
192         // caller shouldn't try to put back more than they've taken
193         SkASSERT(!fBlocks.empty());
194         BufferBlock& block = fBlocks.back();
195         size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree;
196         if (bytes >= bytesUsed) {
197             bytes -= bytesUsed;
198             fBytesInUse -= bytesUsed;
199             // if we locked a vb to satisfy the make space and we're releasing
200             // beyond it, then unmap it.
201             if (block.fBuffer->isMapped()) {
202                 UNMAP_BUFFER(block);
203             }
204             this->destroyBlock();
205         } else {
206             block.fBytesFree += bytes;
207             fBytesInUse -= bytes;
208             bytes = 0;
209             break;
210         }
211     }
212 
213     VALIDATE();
214 }
215 
createBlock(size_t requestSize)216 bool GrBufferAllocPool::createBlock(size_t requestSize) {
217 
218     size_t size = SkTMax(requestSize, fMinBlockSize);
219     SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
220 
221     VALIDATE();
222 
223     BufferBlock& block = fBlocks.push_back();
224 
225     block.fBuffer = this->getBuffer(size);
226     if (!block.fBuffer) {
227         fBlocks.pop_back();
228         return false;
229     }
230 
231     block.fBytesFree = block.fBuffer->gpuMemorySize();
232     if (fBufferPtr) {
233         SkASSERT(fBlocks.count() > 1);
234         BufferBlock& prev = fBlocks.fromBack(1);
235         if (prev.fBuffer->isMapped()) {
236             UNMAP_BUFFER(prev);
237         } else {
238             this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytesFree);
239         }
240         fBufferPtr = nullptr;
241     }
242 
243     SkASSERT(!fBufferPtr);
244 
245     // If the buffer is CPU-backed we map it because it is free to do so and saves a copy.
246     // Otherwise when buffer mapping is supported we map if the buffer size is greater than the
247     // threshold.
248     bool attemptMap = block.fBuffer->isCPUBacked();
249     if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
250         attemptMap = size > fBufferMapThreshold;
251     }
252 
253     if (attemptMap) {
254         fBufferPtr = block.fBuffer->map();
255     }
256 
257     if (!fBufferPtr) {
258         fBufferPtr = this->resetCpuData(block.fBytesFree);
259     }
260 
261     VALIDATE(true);
262 
263     return true;
264 }
265 
destroyBlock()266 void GrBufferAllocPool::destroyBlock() {
267     SkASSERT(!fBlocks.empty());
268 
269     BufferBlock& block = fBlocks.back();
270 
271     SkASSERT(!block.fBuffer->isMapped());
272     block.fBuffer->unref();
273     fBlocks.pop_back();
274     fBufferPtr = nullptr;
275 }
276 
resetCpuData(size_t newSize)277 void* GrBufferAllocPool::resetCpuData(size_t newSize) {
278     sk_free(fCpuData);
279     if (newSize) {
280         if (fGpu->caps()->mustClearUploadedBufferData()) {
281             fCpuData = sk_calloc_throw(newSize);
282         } else {
283             fCpuData = sk_malloc_throw(newSize);
284         }
285     } else {
286         fCpuData = nullptr;
287     }
288     return fCpuData;
289 }
290 
291 
flushCpuData(const BufferBlock & block,size_t flushSize)292 void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
293     GrBuffer* buffer = block.fBuffer;
294     SkASSERT(buffer);
295     SkASSERT(!buffer->isMapped());
296     SkASSERT(fCpuData == fBufferPtr);
297     SkASSERT(flushSize <= buffer->gpuMemorySize());
298     VALIDATE(true);
299 
300     if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
301         flushSize > fBufferMapThreshold) {
302         void* data = buffer->map();
303         if (data) {
304             memcpy(data, fBufferPtr, flushSize);
305             UNMAP_BUFFER(block);
306             return;
307         }
308     }
309     buffer->updateData(fBufferPtr, flushSize);
310     VALIDATE(true);
311 }
312 
getBuffer(size_t size)313 GrBuffer* GrBufferAllocPool::getBuffer(size_t size) {
314 
315     GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
316 
317     // Shouldn't have to use this flag (https://bug.skia.org/4156)
318     static const uint32_t kFlags = GrResourceProvider::kNoPendingIO_Flag;
319     return rp->createBuffer(size, fBufferType, kDynamic_GrAccessPattern, kFlags);
320 }
321 
322 ////////////////////////////////////////////////////////////////////////////////
323 
GrVertexBufferAllocPool(GrGpu * gpu)324 GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu)
325     : GrBufferAllocPool(gpu, kVertex_GrBufferType, MIN_VERTEX_BUFFER_SIZE) {
326 }
327 
makeSpace(size_t vertexSize,int vertexCount,const GrBuffer ** buffer,int * startVertex)328 void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
329                                          int vertexCount,
330                                          const GrBuffer** buffer,
331                                          int* startVertex) {
332 
333     SkASSERT(vertexCount >= 0);
334     SkASSERT(buffer);
335     SkASSERT(startVertex);
336 
337     size_t offset SK_INIT_TO_AVOID_WARNING;
338     void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
339                                      vertexSize,
340                                      buffer,
341                                      &offset);
342 
343     SkASSERT(0 == offset % vertexSize);
344     *startVertex = static_cast<int>(offset / vertexSize);
345     return ptr;
346 }
347 
348 ////////////////////////////////////////////////////////////////////////////////
349 
GrIndexBufferAllocPool(GrGpu * gpu)350 GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
351     : GrBufferAllocPool(gpu, kIndex_GrBufferType, MIN_INDEX_BUFFER_SIZE) {
352 }
353 
makeSpace(int indexCount,const GrBuffer ** buffer,int * startIndex)354 void* GrIndexBufferAllocPool::makeSpace(int indexCount,
355                                         const GrBuffer** buffer,
356                                         int* startIndex) {
357 
358     SkASSERT(indexCount >= 0);
359     SkASSERT(buffer);
360     SkASSERT(startIndex);
361 
362     size_t offset SK_INIT_TO_AVOID_WARNING;
363     void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
364                                      sizeof(uint16_t),
365                                      buffer,
366                                      &offset);
367 
368     SkASSERT(0 == offset % sizeof(uint16_t));
369     *startIndex = static_cast<int>(offset / sizeof(uint16_t));
370     return ptr;
371 }
372