1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrBatch.h"
9 #include "GrBatchTarget.h"
10 #include "GrResourceProvider.h"
11
12 #include "GrMemoryPool.h"
13 #include "SkSpinlock.h"
14
15 // TODO I noticed a small benefit to using a larger exclusive pool for batches. Its very small,
16 // but seems to be mostly consistent. There is a lot in flux right now, but we should really
17 // revisit this when batch is everywhere
18
19
20 // We use a global pool protected by a mutex(spinlock). Chrome may use the same GrContext on
21 // different threads. The GrContext is not used concurrently on different threads and there is a
22 // memory barrier between accesses of a context on different threads. Also, there may be multiple
23 // GrContexts and those contexts may be in use concurrently on different threads.
24 namespace {
25 SK_DECLARE_STATIC_SPINLOCK(gBatchSpinlock);
26 class MemoryPoolAccessor {
27 public:
MemoryPoolAccessor()28 MemoryPoolAccessor() { gBatchSpinlock.acquire(); }
29
~MemoryPoolAccessor()30 ~MemoryPoolAccessor() { gBatchSpinlock.release(); }
31
pool() const32 GrMemoryPool* pool() const {
33 static GrMemoryPool gPool(16384, 16384);
34 return &gPool;
35 }
36 };
37 }
38
39 int32_t GrBatch::gCurrBatchClassID = GrBatch::kIllegalBatchClassID;
40
operator new(size_t size)41 void* GrBatch::operator new(size_t size) {
42 return MemoryPoolAccessor().pool()->allocate(size);
43 }
44
operator delete(void * target)45 void GrBatch::operator delete(void* target) {
46 return MemoryPoolAccessor().pool()->release(target);
47 }
48
init(GrBatchTarget * batchTarget,GrPrimitiveType primType,size_t vertexStride,const GrIndexBuffer * indexBuffer,int verticesPerInstance,int indicesPerInstance,int instancesToDraw)49 void* GrBatch::InstancedHelper::init(GrBatchTarget* batchTarget, GrPrimitiveType primType,
50 size_t vertexStride, const GrIndexBuffer* indexBuffer,
51 int verticesPerInstance, int indicesPerInstance,
52 int instancesToDraw) {
53 SkASSERT(batchTarget);
54 if (!indexBuffer) {
55 return NULL;
56 }
57 const GrVertexBuffer* vertexBuffer;
58 int firstVertex;
59 int vertexCount = verticesPerInstance * instancesToDraw;
60 void* vertices = batchTarget->makeVertSpace(vertexStride, vertexCount,
61 &vertexBuffer, &firstVertex);
62 if (!vertices) {
63 SkDebugf("Vertices could not be allocated for instanced rendering.");
64 return NULL;
65 }
66 SkASSERT(vertexBuffer);
67 size_t ibSize = indexBuffer->gpuMemorySize();
68 int maxInstancesPerDraw = static_cast<int>(ibSize / (sizeof(uint16_t) * indicesPerInstance));
69
70 fVertices.initInstanced(primType, vertexBuffer, indexBuffer,
71 firstVertex, verticesPerInstance, indicesPerInstance, instancesToDraw,
72 maxInstancesPerDraw);
73 return vertices;
74 }
75
init(GrBatchTarget * batchTarget,size_t vertexStride,int quadsToDraw)76 void* GrBatch::QuadHelper::init(GrBatchTarget* batchTarget, size_t vertexStride, int quadsToDraw) {
77 SkAutoTUnref<const GrIndexBuffer> quadIndexBuffer(
78 batchTarget->resourceProvider()->refQuadIndexBuffer());
79 if (!quadIndexBuffer) {
80 SkDebugf("Could not get quad index buffer.");
81 return NULL;
82 }
83 return this->INHERITED::init(batchTarget, kTriangles_GrPrimitiveType, vertexStride,
84 quadIndexBuffer, kVerticesPerQuad, kIndicesPerQuad, quadsToDraw);
85 }
86