1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrVkResourceProvider.h"
9
10 #include "GrContextPriv.h"
11 #include "GrSamplerState.h"
12 #include "GrVkCommandBuffer.h"
13 #include "GrVkCommandPool.h"
14 #include "GrVkCopyPipeline.h"
15 #include "GrVkGpu.h"
16 #include "GrVkPipeline.h"
17 #include "GrVkRenderTarget.h"
18 #include "GrVkUniformBuffer.h"
19 #include "GrVkUtil.h"
20 #include "SkTaskGroup.h"
21
22 #ifdef SK_TRACE_VK_RESOURCES
23 std::atomic<uint32_t> GrVkResource::fKeyCounter{0};
24 #endif
25
GrVkResourceProvider(GrVkGpu * gpu)26 GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu)
27 : fGpu(gpu)
28 , fPipelineCache(VK_NULL_HANDLE) {
29 fPipelineStateCache = new PipelineStateCache(gpu);
30 }
31
~GrVkResourceProvider()32 GrVkResourceProvider::~GrVkResourceProvider() {
33 SkASSERT(0 == fRenderPassArray.count());
34 SkASSERT(0 == fExternalRenderPasses.count());
35 SkASSERT(VK_NULL_HANDLE == fPipelineCache);
36 delete fPipelineStateCache;
37 }
38
pipelineCache()39 VkPipelineCache GrVkResourceProvider::pipelineCache() {
40 if (fPipelineCache == VK_NULL_HANDLE) {
41 VkPipelineCacheCreateInfo createInfo;
42 memset(&createInfo, 0, sizeof(VkPipelineCacheCreateInfo));
43 createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
44 createInfo.pNext = nullptr;
45 createInfo.flags = 0;
46
47 auto persistentCache = fGpu->getContext()->priv().getPersistentCache();
48 sk_sp<SkData> cached;
49 if (persistentCache) {
50 uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
51 sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
52 cached = persistentCache->load(*keyData);
53 }
54 bool usedCached = false;
55 if (cached) {
56 uint32_t* cacheHeader = (uint32_t*)cached->data();
57 if (cacheHeader[1] == VK_PIPELINE_CACHE_HEADER_VERSION_ONE) {
58 // For version one of the header, the total header size is 16 bytes plus
59 // VK_UUID_SIZE bytes. See Section 9.6 (Pipeline Cache) in the vulkan spec to see
60 // the breakdown of these bytes.
61 SkASSERT(cacheHeader[0] == 16 + VK_UUID_SIZE);
62 const VkPhysicalDeviceProperties& devProps = fGpu->physicalDeviceProperties();
63 const uint8_t* supportedPipelineCacheUUID = devProps.pipelineCacheUUID;
64 if (cacheHeader[2] == devProps.vendorID && cacheHeader[3] == devProps.deviceID &&
65 !memcmp(&cacheHeader[4], supportedPipelineCacheUUID, VK_UUID_SIZE)) {
66 createInfo.initialDataSize = cached->size();
67 createInfo.pInitialData = cached->data();
68 usedCached = true;
69 }
70 }
71 }
72 if (!usedCached) {
73 createInfo.initialDataSize = 0;
74 createInfo.pInitialData = nullptr;
75 }
76 VkResult result = GR_VK_CALL(fGpu->vkInterface(),
77 CreatePipelineCache(fGpu->device(), &createInfo, nullptr,
78 &fPipelineCache));
79 SkASSERT(VK_SUCCESS == result);
80 if (VK_SUCCESS != result) {
81 fPipelineCache = VK_NULL_HANDLE;
82 }
83 }
84 return fPipelineCache;
85 }
86
init()87 void GrVkResourceProvider::init() {
88 // Init uniform descriptor objects
89 GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateUniformManager(fGpu);
90 fDescriptorSetManagers.emplace_back(dsm);
91 SkASSERT(1 == fDescriptorSetManagers.count());
92 fUniformDSHandle = GrVkDescriptorSetManager::Handle(0);
93 }
94
createPipeline(int numColorSamples,const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline,const GrStencilSettings & stencil,VkPipelineShaderStageCreateInfo * shaderStageInfo,int shaderStageCount,GrPrimitiveType primitiveType,VkRenderPass compatibleRenderPass,VkPipelineLayout layout)95 GrVkPipeline* GrVkResourceProvider::createPipeline(int numColorSamples,
96 const GrPrimitiveProcessor& primProc,
97 const GrPipeline& pipeline,
98 const GrStencilSettings& stencil,
99 VkPipelineShaderStageCreateInfo* shaderStageInfo,
100 int shaderStageCount,
101 GrPrimitiveType primitiveType,
102 VkRenderPass compatibleRenderPass,
103 VkPipelineLayout layout) {
104 return GrVkPipeline::Create(fGpu, numColorSamples, primProc, pipeline, stencil, shaderStageInfo,
105 shaderStageCount, primitiveType, compatibleRenderPass, layout,
106 this->pipelineCache());
107 }
108
findOrCreateCopyPipeline(const GrVkRenderTarget * dst,VkPipelineShaderStageCreateInfo * shaderStageInfo,VkPipelineLayout pipelineLayout)109 GrVkCopyPipeline* GrVkResourceProvider::findOrCreateCopyPipeline(
110 const GrVkRenderTarget* dst,
111 VkPipelineShaderStageCreateInfo* shaderStageInfo,
112 VkPipelineLayout pipelineLayout) {
113 // Find or Create a compatible pipeline
114 GrVkCopyPipeline* pipeline = nullptr;
115 for (int i = 0; i < fCopyPipelines.count() && !pipeline; ++i) {
116 if (fCopyPipelines[i]->isCompatible(*dst->simpleRenderPass())) {
117 pipeline = fCopyPipelines[i];
118 }
119 }
120 if (!pipeline) {
121 pipeline = GrVkCopyPipeline::Create(fGpu, shaderStageInfo,
122 pipelineLayout,
123 dst->numColorSamples(),
124 *dst->simpleRenderPass(),
125 this->pipelineCache());
126 if (!pipeline) {
127 return nullptr;
128 }
129 fCopyPipelines.push_back(pipeline);
130 }
131 SkASSERT(pipeline);
132 pipeline->ref();
133 return pipeline;
134 }
135
136 // To create framebuffers, we first need to create a simple RenderPass that is
137 // only used for framebuffer creation. When we actually render we will create
138 // RenderPasses as needed that are compatible with the framebuffer.
139 const GrVkRenderPass*
findCompatibleRenderPass(const GrVkRenderTarget & target,CompatibleRPHandle * compatibleHandle)140 GrVkResourceProvider::findCompatibleRenderPass(const GrVkRenderTarget& target,
141 CompatibleRPHandle* compatibleHandle) {
142 for (int i = 0; i < fRenderPassArray.count(); ++i) {
143 if (fRenderPassArray[i].isCompatible(target)) {
144 const GrVkRenderPass* renderPass = fRenderPassArray[i].getCompatibleRenderPass();
145 renderPass->ref();
146 if (compatibleHandle) {
147 *compatibleHandle = CompatibleRPHandle(i);
148 }
149 return renderPass;
150 }
151 }
152
153 const GrVkRenderPass* renderPass =
154 fRenderPassArray.emplace_back(fGpu, target).getCompatibleRenderPass();
155 renderPass->ref();
156
157 if (compatibleHandle) {
158 *compatibleHandle = CompatibleRPHandle(fRenderPassArray.count() - 1);
159 }
160 return renderPass;
161 }
162
163 const GrVkRenderPass*
findCompatibleRenderPass(const CompatibleRPHandle & compatibleHandle)164 GrVkResourceProvider::findCompatibleRenderPass(const CompatibleRPHandle& compatibleHandle) {
165 SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.count());
166 int index = compatibleHandle.toIndex();
167 const GrVkRenderPass* renderPass = fRenderPassArray[index].getCompatibleRenderPass();
168 renderPass->ref();
169 return renderPass;
170 }
171
findCompatibleExternalRenderPass(VkRenderPass renderPass,uint32_t colorAttachmentIndex)172 const GrVkRenderPass* GrVkResourceProvider::findCompatibleExternalRenderPass(
173 VkRenderPass renderPass, uint32_t colorAttachmentIndex) {
174 for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
175 if (fExternalRenderPasses[i]->isCompatibleExternalRP(renderPass)) {
176 fExternalRenderPasses[i]->ref();
177 #ifdef SK_DEBUG
178 uint32_t cachedColorIndex;
179 SkASSERT(fExternalRenderPasses[i]->colorAttachmentIndex(&cachedColorIndex));
180 SkASSERT(cachedColorIndex == colorAttachmentIndex);
181 #endif
182 return fExternalRenderPasses[i];
183 }
184 }
185
186 const GrVkRenderPass* newRenderPass = new GrVkRenderPass(renderPass, colorAttachmentIndex);
187 fExternalRenderPasses.push_back(newRenderPass);
188 newRenderPass->ref();
189 return newRenderPass;
190 }
191
findRenderPass(const GrVkRenderTarget & target,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & stencilOps,CompatibleRPHandle * compatibleHandle)192 const GrVkRenderPass* GrVkResourceProvider::findRenderPass(
193 const GrVkRenderTarget& target,
194 const GrVkRenderPass::LoadStoreOps& colorOps,
195 const GrVkRenderPass::LoadStoreOps& stencilOps,
196 CompatibleRPHandle* compatibleHandle) {
197 GrVkResourceProvider::CompatibleRPHandle tempRPHandle;
198 GrVkResourceProvider::CompatibleRPHandle* pRPHandle = compatibleHandle ? compatibleHandle
199 : &tempRPHandle;
200 *pRPHandle = target.compatibleRenderPassHandle();
201
202 // This will get us the handle to (and possible create) the compatible set for the specific
203 // GrVkRenderPass we are looking for.
204 this->findCompatibleRenderPass(target, compatibleHandle);
205 return this->findRenderPass(*pRPHandle, colorOps, stencilOps);
206 }
207
208 const GrVkRenderPass*
findRenderPass(const CompatibleRPHandle & compatibleHandle,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & stencilOps)209 GrVkResourceProvider::findRenderPass(const CompatibleRPHandle& compatibleHandle,
210 const GrVkRenderPass::LoadStoreOps& colorOps,
211 const GrVkRenderPass::LoadStoreOps& stencilOps) {
212 SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.count());
213 CompatibleRenderPassSet& compatibleSet = fRenderPassArray[compatibleHandle.toIndex()];
214 const GrVkRenderPass* renderPass = compatibleSet.getRenderPass(fGpu,
215 colorOps,
216 stencilOps);
217 renderPass->ref();
218 return renderPass;
219 }
220
findOrCreateCompatibleDescriptorPool(VkDescriptorType type,uint32_t count)221 GrVkDescriptorPool* GrVkResourceProvider::findOrCreateCompatibleDescriptorPool(
222 VkDescriptorType type, uint32_t count) {
223 return new GrVkDescriptorPool(fGpu, type, count);
224 }
225
findOrCreateCompatibleSampler(const GrSamplerState & params,const GrVkYcbcrConversionInfo & ycbcrInfo)226 GrVkSampler* GrVkResourceProvider::findOrCreateCompatibleSampler(
227 const GrSamplerState& params, const GrVkYcbcrConversionInfo& ycbcrInfo) {
228 GrVkSampler* sampler = fSamplers.find(GrVkSampler::GenerateKey(params, ycbcrInfo));
229 if (!sampler) {
230 sampler = GrVkSampler::Create(fGpu, params, ycbcrInfo);
231 if (!sampler) {
232 return nullptr;
233 }
234 fSamplers.add(sampler);
235 }
236 SkASSERT(sampler);
237 sampler->ref();
238 return sampler;
239 }
240
findOrCreateCompatibleSamplerYcbcrConversion(const GrVkYcbcrConversionInfo & ycbcrInfo)241 GrVkSamplerYcbcrConversion* GrVkResourceProvider::findOrCreateCompatibleSamplerYcbcrConversion(
242 const GrVkYcbcrConversionInfo& ycbcrInfo) {
243 GrVkSamplerYcbcrConversion* ycbcrConversion =
244 fYcbcrConversions.find(GrVkSamplerYcbcrConversion::GenerateKey(ycbcrInfo));
245 if (!ycbcrConversion) {
246 ycbcrConversion = GrVkSamplerYcbcrConversion::Create(fGpu, ycbcrInfo);
247 if (!ycbcrConversion) {
248 return nullptr;
249 }
250 fYcbcrConversions.add(ycbcrConversion);
251 }
252 SkASSERT(ycbcrConversion);
253 ycbcrConversion->ref();
254 return ycbcrConversion;
255 }
256
findOrCreateCompatiblePipelineState(GrRenderTarget * renderTarget,GrSurfaceOrigin origin,const GrPipeline & pipeline,const GrPrimitiveProcessor & proc,const GrTextureProxy * const primProcProxies[],GrPrimitiveType primitiveType,VkRenderPass compatibleRenderPass)257 GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
258 GrRenderTarget* renderTarget, GrSurfaceOrigin origin,
259 const GrPipeline& pipeline, const GrPrimitiveProcessor& proc,
260 const GrTextureProxy* const primProcProxies[], GrPrimitiveType primitiveType,
261 VkRenderPass compatibleRenderPass) {
262 return fPipelineStateCache->refPipelineState(renderTarget, origin, proc, primProcProxies,
263 pipeline, primitiveType, compatibleRenderPass);
264 }
265
getSamplerDescriptorSetHandle(VkDescriptorType type,const GrVkUniformHandler & uniformHandler,GrVkDescriptorSetManager::Handle * handle)266 void GrVkResourceProvider::getSamplerDescriptorSetHandle(VkDescriptorType type,
267 const GrVkUniformHandler& uniformHandler,
268 GrVkDescriptorSetManager::Handle* handle) {
269 SkASSERT(handle);
270 SkASSERT(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
271 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type);
272 for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
273 if (fDescriptorSetManagers[i]->isCompatible(type, &uniformHandler)) {
274 *handle = GrVkDescriptorSetManager::Handle(i);
275 return;
276 }
277 }
278
279 GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateSamplerManager(fGpu, type,
280 uniformHandler);
281 fDescriptorSetManagers.emplace_back(dsm);
282 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
283 }
284
getSamplerDescriptorSetHandle(VkDescriptorType type,const SkTArray<uint32_t> & visibilities,GrVkDescriptorSetManager::Handle * handle)285 void GrVkResourceProvider::getSamplerDescriptorSetHandle(VkDescriptorType type,
286 const SkTArray<uint32_t>& visibilities,
287 GrVkDescriptorSetManager::Handle* handle) {
288 SkASSERT(handle);
289 SkASSERT(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
290 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type);
291 for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
292 if (fDescriptorSetManagers[i]->isCompatible(type, visibilities)) {
293 *handle = GrVkDescriptorSetManager::Handle(i);
294 return;
295 }
296 }
297
298 GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateSamplerManager(fGpu, type,
299 visibilities);
300 fDescriptorSetManagers.emplace_back(dsm);
301 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
302 }
303
getUniformDSLayout() const304 VkDescriptorSetLayout GrVkResourceProvider::getUniformDSLayout() const {
305 SkASSERT(fUniformDSHandle.isValid());
306 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->layout();
307 }
308
getSamplerDSLayout(const GrVkDescriptorSetManager::Handle & handle) const309 VkDescriptorSetLayout GrVkResourceProvider::getSamplerDSLayout(
310 const GrVkDescriptorSetManager::Handle& handle) const {
311 SkASSERT(handle.isValid());
312 return fDescriptorSetManagers[handle.toIndex()]->layout();
313 }
314
getUniformDescriptorSet()315 const GrVkDescriptorSet* GrVkResourceProvider::getUniformDescriptorSet() {
316 SkASSERT(fUniformDSHandle.isValid());
317 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->getDescriptorSet(fGpu,
318 fUniformDSHandle);
319 }
320
getSamplerDescriptorSet(const GrVkDescriptorSetManager::Handle & handle)321 const GrVkDescriptorSet* GrVkResourceProvider::getSamplerDescriptorSet(
322 const GrVkDescriptorSetManager::Handle& handle) {
323 SkASSERT(handle.isValid());
324 return fDescriptorSetManagers[handle.toIndex()]->getDescriptorSet(fGpu, handle);
325 }
326
recycleDescriptorSet(const GrVkDescriptorSet * descSet,const GrVkDescriptorSetManager::Handle & handle)327 void GrVkResourceProvider::recycleDescriptorSet(const GrVkDescriptorSet* descSet,
328 const GrVkDescriptorSetManager::Handle& handle) {
329 SkASSERT(descSet);
330 SkASSERT(handle.isValid());
331 int managerIdx = handle.toIndex();
332 SkASSERT(managerIdx < fDescriptorSetManagers.count());
333 fDescriptorSetManagers[managerIdx]->recycleDescriptorSet(descSet);
334 }
335
findOrCreateCommandPool()336 GrVkCommandPool* GrVkResourceProvider::findOrCreateCommandPool() {
337 std::unique_lock<std::recursive_mutex> lock(fBackgroundMutex);
338 GrVkCommandPool* result;
339 if (fAvailableCommandPools.count()) {
340 result = fAvailableCommandPools.back();
341 fAvailableCommandPools.pop_back();
342 } else {
343 result = GrVkCommandPool::Create(fGpu);
344 }
345 SkASSERT(result->unique());
346 SkDEBUGCODE(
347 for (const GrVkCommandPool* pool : fActiveCommandPools) {
348 SkASSERT(pool != result);
349 }
350 for (const GrVkCommandPool* pool : fAvailableCommandPools) {
351 SkASSERT(pool != result);
352 }
353 )
354 fActiveCommandPools.push_back(result);
355 result->ref();
356 return result;
357 }
358
checkCommandBuffers()359 void GrVkResourceProvider::checkCommandBuffers() {
360 for (int i = fActiveCommandPools.count() - 1; i >= 0; --i) {
361 GrVkCommandPool* pool = fActiveCommandPools[i];
362 if (!pool->isOpen()) {
363 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
364 if (buffer->finished(fGpu)) {
365 fActiveCommandPools.removeShuffle(i);
366 this->backgroundReset(pool);
367 }
368 }
369 }
370 }
371
addFinishedProcToActiveCommandBuffers(GrGpuFinishedProc finishedProc,GrGpuFinishedContext finishedContext)372 void GrVkResourceProvider::addFinishedProcToActiveCommandBuffers(
373 GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext) {
374 sk_sp<GrRefCntedCallback> procRef(new GrRefCntedCallback(finishedProc, finishedContext));
375 for (int i = 0; i < fActiveCommandPools.count(); ++i) {
376 GrVkCommandPool* pool = fActiveCommandPools[i];
377 if (!pool->isOpen()) {
378 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
379 buffer->addFinishedProc(procRef);
380 }
381 }
382 }
383
findOrCreateStandardUniformBufferResource()384 const GrVkResource* GrVkResourceProvider::findOrCreateStandardUniformBufferResource() {
385 const GrVkResource* resource = nullptr;
386 int count = fAvailableUniformBufferResources.count();
387 if (count > 0) {
388 resource = fAvailableUniformBufferResources[count - 1];
389 fAvailableUniformBufferResources.removeShuffle(count - 1);
390 } else {
391 resource = GrVkUniformBuffer::CreateResource(fGpu, GrVkUniformBuffer::kStandardSize);
392 }
393 return resource;
394 }
395
recycleStandardUniformBufferResource(const GrVkResource * resource)396 void GrVkResourceProvider::recycleStandardUniformBufferResource(const GrVkResource* resource) {
397 fAvailableUniformBufferResources.push_back(resource);
398 }
399
destroyResources(bool deviceLost)400 void GrVkResourceProvider::destroyResources(bool deviceLost) {
401 SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
402 if (taskGroup) {
403 taskGroup->wait();
404 }
405
406 // Release all copy pipelines
407 for (int i = 0; i < fCopyPipelines.count(); ++i) {
408 fCopyPipelines[i]->unref(fGpu);
409 }
410
411 // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
412 for (int i = 0; i < fRenderPassArray.count(); ++i) {
413 fRenderPassArray[i].releaseResources(fGpu);
414 }
415 fRenderPassArray.reset();
416
417 for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
418 fExternalRenderPasses[i]->unref(fGpu);
419 }
420 fExternalRenderPasses.reset();
421
422 // Iterate through all store GrVkSamplers and unref them before resetting the hash.
423 SkTDynamicHash<GrVkSampler, GrVkSampler::Key>::Iter iter(&fSamplers);
424 for (; !iter.done(); ++iter) {
425 (*iter).unref(fGpu);
426 }
427 fSamplers.reset();
428
429 fPipelineStateCache->release();
430
431 GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineCache(fGpu->device(), fPipelineCache, nullptr));
432 fPipelineCache = VK_NULL_HANDLE;
433
434 for (GrVkCommandPool* pool : fActiveCommandPools) {
435 SkASSERT(pool->unique());
436 pool->unref(fGpu);
437 }
438 fActiveCommandPools.reset();
439
440 for (GrVkCommandPool* pool : fAvailableCommandPools) {
441 SkASSERT(pool->unique());
442 pool->unref(fGpu);
443 }
444 fAvailableCommandPools.reset();
445
446 // We must release/destroy all command buffers and pipeline states before releasing the
447 // GrVkDescriptorSetManagers
448 for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
449 fDescriptorSetManagers[i]->release(fGpu);
450 }
451 fDescriptorSetManagers.reset();
452
453 // release our uniform buffers
454 for (int i = 0; i < fAvailableUniformBufferResources.count(); ++i) {
455 SkASSERT(fAvailableUniformBufferResources[i]->unique());
456 fAvailableUniformBufferResources[i]->unref(fGpu);
457 }
458 fAvailableUniformBufferResources.reset();
459 }
460
abandonResources()461 void GrVkResourceProvider::abandonResources() {
462 SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
463 if (taskGroup) {
464 taskGroup->wait();
465 }
466
467 // Abandon all command pools
468 for (int i = 0; i < fActiveCommandPools.count(); ++i) {
469 SkASSERT(fActiveCommandPools[i]->unique());
470 fActiveCommandPools[i]->unrefAndAbandon();
471 }
472 fActiveCommandPools.reset();
473 for (int i = 0; i < fAvailableCommandPools.count(); ++i) {
474 SkASSERT(fAvailableCommandPools[i]->unique());
475 fAvailableCommandPools[i]->unrefAndAbandon();
476 }
477 fAvailableCommandPools.reset();
478
479 // Abandon all copy pipelines
480 for (int i = 0; i < fCopyPipelines.count(); ++i) {
481 fCopyPipelines[i]->unrefAndAbandon();
482 }
483
484 // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
485 for (int i = 0; i < fRenderPassArray.count(); ++i) {
486 fRenderPassArray[i].abandonResources();
487 }
488 fRenderPassArray.reset();
489
490 for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
491 fExternalRenderPasses[i]->unrefAndAbandon();
492 }
493 fExternalRenderPasses.reset();
494
495 // Iterate through all store GrVkSamplers and unrefAndAbandon them before resetting the hash.
496 SkTDynamicHash<GrVkSampler, GrVkSampler::Key>::Iter iter(&fSamplers);
497 for (; !iter.done(); ++iter) {
498 (*iter).unrefAndAbandon();
499 }
500 fSamplers.reset();
501
502 fPipelineStateCache->abandon();
503
504 fPipelineCache = VK_NULL_HANDLE;
505
506 // We must abandon all command buffers and pipeline states before abandoning the
507 // GrVkDescriptorSetManagers
508 for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
509 fDescriptorSetManagers[i]->abandon();
510 }
511 fDescriptorSetManagers.reset();
512
513 // release our uniform buffers
514 for (int i = 0; i < fAvailableUniformBufferResources.count(); ++i) {
515 SkASSERT(fAvailableUniformBufferResources[i]->unique());
516 fAvailableUniformBufferResources[i]->unrefAndAbandon();
517 }
518 fAvailableUniformBufferResources.reset();
519 }
520
backgroundReset(GrVkCommandPool * pool)521 void GrVkResourceProvider::backgroundReset(GrVkCommandPool* pool) {
522 SkASSERT(pool->unique());
523 pool->releaseResources(fGpu);
524 SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
525 if (taskGroup) {
526 taskGroup->add([this, pool]() {
527 this->reset(pool);
528 });
529 } else {
530 this->reset(pool);
531 }
532 }
533
reset(GrVkCommandPool * pool)534 void GrVkResourceProvider::reset(GrVkCommandPool* pool) {
535 SkASSERT(pool->unique());
536 pool->reset(fGpu);
537 std::unique_lock<std::recursive_mutex> providerLock(fBackgroundMutex);
538 fAvailableCommandPools.push_back(pool);
539 }
540
storePipelineCacheData()541 void GrVkResourceProvider::storePipelineCacheData() {
542 size_t dataSize = 0;
543 VkResult result = GR_VK_CALL(fGpu->vkInterface(), GetPipelineCacheData(fGpu->device(),
544 this->pipelineCache(),
545 &dataSize, nullptr));
546 SkASSERT(result == VK_SUCCESS);
547
548 std::unique_ptr<uint8_t[]> data(new uint8_t[dataSize]);
549
550 result = GR_VK_CALL(fGpu->vkInterface(), GetPipelineCacheData(fGpu->device(),
551 this->pipelineCache(),
552 &dataSize,
553 (void*)data.get()));
554 SkASSERT(result == VK_SUCCESS);
555
556 uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
557 sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
558
559 fGpu->getContext()->priv().getPersistentCache()->store(
560 *keyData, *SkData::MakeWithoutCopy(data.get(), dataSize));
561 }
562
563 ////////////////////////////////////////////////////////////////////////////////
564
CompatibleRenderPassSet(const GrVkGpu * gpu,const GrVkRenderTarget & target)565 GrVkResourceProvider::CompatibleRenderPassSet::CompatibleRenderPassSet(
566 const GrVkGpu* gpu,
567 const GrVkRenderTarget& target)
568 : fLastReturnedIndex(0) {
569 fRenderPasses.emplace_back(new GrVkRenderPass());
570 fRenderPasses[0]->initSimple(gpu, target);
571 }
572
isCompatible(const GrVkRenderTarget & target) const573 bool GrVkResourceProvider::CompatibleRenderPassSet::isCompatible(
574 const GrVkRenderTarget& target) const {
575 // The first GrVkRenderpass should always exists since we create the basic load store
576 // render pass on create
577 SkASSERT(fRenderPasses[0]);
578 return fRenderPasses[0]->isCompatible(target);
579 }
580
getRenderPass(const GrVkGpu * gpu,const GrVkRenderPass::LoadStoreOps & colorOps,const GrVkRenderPass::LoadStoreOps & stencilOps)581 GrVkRenderPass* GrVkResourceProvider::CompatibleRenderPassSet::getRenderPass(
582 const GrVkGpu* gpu,
583 const GrVkRenderPass::LoadStoreOps& colorOps,
584 const GrVkRenderPass::LoadStoreOps& stencilOps) {
585 for (int i = 0; i < fRenderPasses.count(); ++i) {
586 int idx = (i + fLastReturnedIndex) % fRenderPasses.count();
587 if (fRenderPasses[idx]->equalLoadStoreOps(colorOps, stencilOps)) {
588 fLastReturnedIndex = idx;
589 return fRenderPasses[idx];
590 }
591 }
592 GrVkRenderPass* renderPass = fRenderPasses.emplace_back(new GrVkRenderPass());
593 renderPass->init(gpu, *this->getCompatibleRenderPass(), colorOps, stencilOps);
594 fLastReturnedIndex = fRenderPasses.count() - 1;
595 return renderPass;
596 }
597
releaseResources(GrVkGpu * gpu)598 void GrVkResourceProvider::CompatibleRenderPassSet::releaseResources(GrVkGpu* gpu) {
599 for (int i = 0; i < fRenderPasses.count(); ++i) {
600 if (fRenderPasses[i]) {
601 fRenderPasses[i]->unref(gpu);
602 fRenderPasses[i] = nullptr;
603 }
604 }
605 }
606
abandonResources()607 void GrVkResourceProvider::CompatibleRenderPassSet::abandonResources() {
608 for (int i = 0; i < fRenderPasses.count(); ++i) {
609 if (fRenderPasses[i]) {
610 fRenderPasses[i]->unrefAndAbandon();
611 fRenderPasses[i] = nullptr;
612 }
613 }
614 }
615