1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrVkBuffer.h"
9 #include "GrVkGpu.h"
10 #include "GrVkMemory.h"
11 #include "GrVkUtil.h"
12
13 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
14
15 #ifdef SK_DEBUG
16 #define VALIDATE() this->validate()
17 #else
18 #define VALIDATE() do {} while(false)
19 #endif
20
Create(const GrVkGpu * gpu,const Desc & desc)21 const GrVkBuffer::Resource* GrVkBuffer::Create(const GrVkGpu* gpu, const Desc& desc) {
22 VkBuffer buffer;
23 GrVkAlloc alloc;
24
25 // create the buffer object
26 VkBufferCreateInfo bufInfo;
27 memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
28 bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
29 bufInfo.flags = 0;
30 bufInfo.size = desc.fSizeInBytes;
31 switch (desc.fType) {
32 case kVertex_Type:
33 bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
34 break;
35 case kIndex_Type:
36 bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
37 break;
38 case kUniform_Type:
39 bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
40 break;
41 case kCopyRead_Type:
42 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
43 break;
44 case kCopyWrite_Type:
45 bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
46 break;
47 }
48 if (!desc.fDynamic) {
49 bufInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
50 }
51
52 bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
53 bufInfo.queueFamilyIndexCount = 0;
54 bufInfo.pQueueFamilyIndices = nullptr;
55
56 VkResult err;
57 err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
58 if (err) {
59 return nullptr;
60 }
61
62 if (!GrVkMemory::AllocAndBindBufferMemory(gpu,
63 buffer,
64 desc.fType,
65 desc.fDynamic,
66 &alloc)) {
67 return nullptr;
68 }
69
70 const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(buffer, alloc, desc.fType);
71 if (!resource) {
72 VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
73 GrVkMemory::FreeBufferMemory(gpu, desc.fType, alloc);
74 return nullptr;
75 }
76
77 return resource;
78 }
79
addMemoryBarrier(const GrVkGpu * gpu,VkAccessFlags srcAccessMask,VkAccessFlags dstAccesMask,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,bool byRegion) const80 void GrVkBuffer::addMemoryBarrier(const GrVkGpu* gpu,
81 VkAccessFlags srcAccessMask,
82 VkAccessFlags dstAccesMask,
83 VkPipelineStageFlags srcStageMask,
84 VkPipelineStageFlags dstStageMask,
85 bool byRegion) const {
86 VkBufferMemoryBarrier bufferMemoryBarrier = {
87 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
88 NULL, // pNext
89 srcAccessMask, // srcAccessMask
90 dstAccesMask, // dstAccessMask
91 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
92 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
93 this->buffer(), // buffer
94 0, // offset
95 fDesc.fSizeInBytes, // size
96 };
97
98 // TODO: restrict to area of buffer we're interested in
99 gpu->addBufferMemoryBarrier(srcStageMask, dstStageMask, byRegion, &bufferMemoryBarrier);
100 }
101
freeGPUData(const GrVkGpu * gpu) const102 void GrVkBuffer::Resource::freeGPUData(const GrVkGpu* gpu) const {
103 SkASSERT(fBuffer);
104 SkASSERT(fAlloc.fMemory);
105 VK_CALL(gpu, DestroyBuffer(gpu->device(), fBuffer, nullptr));
106 GrVkMemory::FreeBufferMemory(gpu, fType, fAlloc);
107 }
108
vkRelease(const GrVkGpu * gpu)109 void GrVkBuffer::vkRelease(const GrVkGpu* gpu) {
110 VALIDATE();
111 fResource->recycle(const_cast<GrVkGpu*>(gpu));
112 fResource = nullptr;
113 if (!fDesc.fDynamic) {
114 delete[] (unsigned char*)fMapPtr;
115 }
116 fMapPtr = nullptr;
117 VALIDATE();
118 }
119
vkAbandon()120 void GrVkBuffer::vkAbandon() {
121 fResource->unrefAndAbandon();
122 fResource = nullptr;
123 if (!fDesc.fDynamic) {
124 delete[] (unsigned char*)fMapPtr;
125 }
126 fMapPtr = nullptr;
127 VALIDATE();
128 }
129
buffer_type_to_access_flags(GrVkBuffer::Type type)130 VkAccessFlags buffer_type_to_access_flags(GrVkBuffer::Type type) {
131 switch (type) {
132 case GrVkBuffer::kIndex_Type:
133 return VK_ACCESS_INDEX_READ_BIT;
134 case GrVkBuffer::kVertex_Type:
135 return VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
136 default:
137 // This helper is only called for static buffers so we should only ever see index or
138 // vertex buffers types
139 SkASSERT(false);
140 return 0;
141 }
142 }
143
internalMap(GrVkGpu * gpu,size_t size,bool * createdNewBuffer)144 void GrVkBuffer::internalMap(GrVkGpu* gpu, size_t size, bool* createdNewBuffer) {
145 VALIDATE();
146 SkASSERT(!this->vkIsMapped());
147
148 if (!fResource->unique()) {
149 if (fDesc.fDynamic) {
150 // in use by the command buffer, so we need to create a new one
151 fResource->recycle(gpu);
152 fResource = this->createResource(gpu, fDesc);
153 if (createdNewBuffer) {
154 *createdNewBuffer = true;
155 }
156 } else {
157 SkASSERT(fMapPtr);
158 this->addMemoryBarrier(gpu,
159 buffer_type_to_access_flags(fDesc.fType),
160 VK_ACCESS_TRANSFER_WRITE_BIT,
161 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
162 VK_PIPELINE_STAGE_TRANSFER_BIT,
163 false);
164 }
165 }
166
167 if (fDesc.fDynamic) {
168 const GrVkAlloc& alloc = this->alloc();
169 VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc.fMemory,
170 alloc.fOffset + fOffset,
171 size, 0, &fMapPtr));
172 if (err) {
173 fMapPtr = nullptr;
174 }
175 } else {
176 if (!fMapPtr) {
177 fMapPtr = new unsigned char[this->size()];
178 }
179 }
180
181 VALIDATE();
182 }
183
internalUnmap(GrVkGpu * gpu,size_t size)184 void GrVkBuffer::internalUnmap(GrVkGpu* gpu, size_t size) {
185 VALIDATE();
186 SkASSERT(this->vkIsMapped());
187
188 if (fDesc.fDynamic) {
189 GrVkMemory::FlushMappedAlloc(gpu, this->alloc());
190 VK_CALL(gpu, UnmapMemory(gpu->device(), this->alloc().fMemory));
191 fMapPtr = nullptr;
192 } else {
193 gpu->updateBuffer(this, fMapPtr, this->offset(), size);
194 this->addMemoryBarrier(gpu,
195 VK_ACCESS_TRANSFER_WRITE_BIT,
196 buffer_type_to_access_flags(fDesc.fType),
197 VK_PIPELINE_STAGE_TRANSFER_BIT,
198 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
199 false);
200 }
201 }
202
vkIsMapped() const203 bool GrVkBuffer::vkIsMapped() const {
204 VALIDATE();
205 return SkToBool(fMapPtr);
206 }
207
vkUpdateData(GrVkGpu * gpu,const void * src,size_t srcSizeInBytes,bool * createdNewBuffer)208 bool GrVkBuffer::vkUpdateData(GrVkGpu* gpu, const void* src, size_t srcSizeInBytes,
209 bool* createdNewBuffer) {
210 if (srcSizeInBytes > fDesc.fSizeInBytes) {
211 return false;
212 }
213
214 this->internalMap(gpu, srcSizeInBytes, createdNewBuffer);
215 if (!fMapPtr) {
216 return false;
217 }
218
219 memcpy(fMapPtr, src, srcSizeInBytes);
220
221 this->internalUnmap(gpu, srcSizeInBytes);
222
223 return true;
224 }
225
validate() const226 void GrVkBuffer::validate() const {
227 SkASSERT(!fResource || kVertex_Type == fDesc.fType || kIndex_Type == fDesc.fType
228 || kCopyRead_Type == fDesc.fType || kCopyWrite_Type == fDesc.fType
229 || kUniform_Type == fDesc.fType);
230 }
231