1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrVkPipelineState.h"
9 #include "GrContext.h"
10 #include "GrContextPriv.h"
11 #include "GrPipeline.h"
12 #include "GrRenderTarget.h"
13 #include "GrTexturePriv.h"
14 #include "GrVkBufferView.h"
15 #include "GrVkCommandBuffer.h"
16 #include "GrVkDescriptorPool.h"
17 #include "GrVkDescriptorSet.h"
18 #include "GrVkGpu.h"
19 #include "GrVkImageView.h"
20 #include "GrVkMemory.h"
21 #include "GrVkPipeline.h"
22 #include "GrVkPipelineLayout.h"
23 #include "GrVkSampler.h"
24 #include "GrVkTexture.h"
25 #include "GrVkUniformBuffer.h"
26 #include "SkMipMap.h"
27 #include "glsl/GrGLSLFragmentProcessor.h"
28 #include "glsl/GrGLSLGeometryProcessor.h"
29 #include "glsl/GrGLSLXferProcessor.h"
30
GrVkPipelineState(GrVkGpu * gpu,GrVkPipeline * pipeline,VkPipelineLayout layout,const GrVkDescriptorSetManager::Handle & samplerDSHandle,const GrGLSLBuiltinUniformHandles & builtinUniformHandles,const UniformInfoArray & uniforms,uint32_t geometryUniformSize,uint32_t fragmentUniformSize,const UniformInfoArray & samplers,std::unique_ptr<GrGLSLPrimitiveProcessor> geometryProcessor,std::unique_ptr<GrGLSLXferProcessor> xferProcessor,std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fragmentProcessors,int fragmentProcessorCnt)31 GrVkPipelineState::GrVkPipelineState(
32 GrVkGpu* gpu,
33 GrVkPipeline* pipeline,
34 VkPipelineLayout layout,
35 const GrVkDescriptorSetManager::Handle& samplerDSHandle,
36 const GrGLSLBuiltinUniformHandles& builtinUniformHandles,
37 const UniformInfoArray& uniforms,
38 uint32_t geometryUniformSize,
39 uint32_t fragmentUniformSize,
40 const UniformInfoArray& samplers,
41 std::unique_ptr<GrGLSLPrimitiveProcessor> geometryProcessor,
42 std::unique_ptr<GrGLSLXferProcessor> xferProcessor,
43 std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fragmentProcessors,
44 int fragmentProcessorCnt)
45 : fPipeline(pipeline)
46 , fPipelineLayout(new GrVkPipelineLayout(layout))
47 , fUniformDescriptorSet(nullptr)
48 , fSamplerDescriptorSet(nullptr)
49 , fSamplerDSHandle(samplerDSHandle)
50 , fBuiltinUniformHandles(builtinUniformHandles)
51 , fGeometryProcessor(std::move(geometryProcessor))
52 , fXferProcessor(std::move(xferProcessor))
53 , fFragmentProcessors(std::move(fragmentProcessors))
54 , fFragmentProcessorCnt(fragmentProcessorCnt)
55 , fDataManager(uniforms, geometryUniformSize, fragmentUniformSize) {
56 fDescriptorSets[0] = VK_NULL_HANDLE;
57 fDescriptorSets[1] = VK_NULL_HANDLE;
58 fDescriptorSets[2] = VK_NULL_HANDLE;
59
60 fGeometryUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, geometryUniformSize));
61 fFragmentUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, fragmentUniformSize));
62
63 fNumSamplers = samplers.count();
64
65 for (int i = 0; i < fNumSamplers; ++i) {
66 // We store the immutable samplers here and take ownership of the ref from the
67 // GrVkUnformHandler.
68 fImmutableSamplers.push_back(samplers[i].fImmutableSampler);
69 }
70 }
71
~GrVkPipelineState()72 GrVkPipelineState::~GrVkPipelineState() {
73 // Must have freed all GPU resources before this is destroyed
74 SkASSERT(!fPipeline);
75 SkASSERT(!fPipelineLayout);
76 }
77
freeGPUResources(GrVkGpu * gpu)78 void GrVkPipelineState::freeGPUResources(GrVkGpu* gpu) {
79 if (fPipeline) {
80 fPipeline->unref(gpu);
81 fPipeline = nullptr;
82 }
83
84 if (fPipelineLayout) {
85 fPipelineLayout->unref(gpu);
86 fPipelineLayout = nullptr;
87 }
88
89 if (fGeometryUniformBuffer) {
90 fGeometryUniformBuffer->release(gpu);
91 fGeometryUniformBuffer.reset();
92 }
93
94 if (fFragmentUniformBuffer) {
95 fFragmentUniformBuffer->release(gpu);
96 fFragmentUniformBuffer.reset();
97 }
98
99 if (fUniformDescriptorSet) {
100 fUniformDescriptorSet->recycle(const_cast<GrVkGpu*>(gpu));
101 fUniformDescriptorSet = nullptr;
102 }
103
104 if (fSamplerDescriptorSet) {
105 fSamplerDescriptorSet->recycle(const_cast<GrVkGpu*>(gpu));
106 fSamplerDescriptorSet = nullptr;
107 }
108 }
109
abandonGPUResources()110 void GrVkPipelineState::abandonGPUResources() {
111 if (fPipeline) {
112 fPipeline->unrefAndAbandon();
113 fPipeline = nullptr;
114 }
115
116 if (fPipelineLayout) {
117 fPipelineLayout->unrefAndAbandon();
118 fPipelineLayout = nullptr;
119 }
120
121 if (fGeometryUniformBuffer) {
122 fGeometryUniformBuffer->abandon();
123 fGeometryUniformBuffer.reset();
124 }
125
126 if (fFragmentUniformBuffer) {
127 fFragmentUniformBuffer->abandon();
128 fFragmentUniformBuffer.reset();
129 }
130
131 if (fUniformDescriptorSet) {
132 fUniformDescriptorSet->unrefAndAbandon();
133 fUniformDescriptorSet = nullptr;
134 }
135
136 if (fSamplerDescriptorSet) {
137 fSamplerDescriptorSet->unrefAndAbandon();
138 fSamplerDescriptorSet = nullptr;
139 }
140 }
141
setAndBindUniforms(GrVkGpu * gpu,const GrRenderTarget * renderTarget,GrSurfaceOrigin origin,const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline,GrVkCommandBuffer * commandBuffer)142 void GrVkPipelineState::setAndBindUniforms(GrVkGpu* gpu,
143 const GrRenderTarget* renderTarget,
144 GrSurfaceOrigin origin,
145 const GrPrimitiveProcessor& primProc,
146 const GrPipeline& pipeline,
147 GrVkCommandBuffer* commandBuffer) {
148 this->setRenderTargetState(renderTarget, origin);
149
150 fGeometryProcessor->setData(fDataManager, primProc,
151 GrFragmentProcessor::CoordTransformIter(pipeline));
152 GrFragmentProcessor::Iter iter(pipeline);
153 GrGLSLFragmentProcessor::Iter glslIter(fFragmentProcessors.get(), fFragmentProcessorCnt);
154 const GrFragmentProcessor* fp = iter.next();
155 GrGLSLFragmentProcessor* glslFP = glslIter.next();
156 while (fp && glslFP) {
157 glslFP->setData(fDataManager, *fp);
158 fp = iter.next();
159 glslFP = glslIter.next();
160 }
161 SkASSERT(!fp && !glslFP);
162
163 {
164 SkIPoint offset;
165 GrTexture* dstTexture = pipeline.peekDstTexture(&offset);
166
167 fXferProcessor->setData(fDataManager, pipeline.getXferProcessor(), dstTexture, offset);
168 }
169
170 // Get new descriptor set
171 if (fGeometryUniformBuffer || fFragmentUniformBuffer) {
172 int uniformDSIdx = GrVkUniformHandler::kUniformBufferDescSet;
173 if (fDataManager.uploadUniformBuffers(
174 gpu, fGeometryUniformBuffer.get(), fFragmentUniformBuffer.get()) ||
175 !fUniformDescriptorSet) {
176 if (fUniformDescriptorSet) {
177 fUniformDescriptorSet->recycle(gpu);
178 }
179 fUniformDescriptorSet = gpu->resourceProvider().getUniformDescriptorSet();
180 fDescriptorSets[uniformDSIdx] = fUniformDescriptorSet->descriptorSet();
181 this->writeUniformBuffers(gpu);
182 }
183 commandBuffer->bindDescriptorSets(gpu, this, fPipelineLayout, uniformDSIdx, 1,
184 &fDescriptorSets[uniformDSIdx], 0, nullptr);
185 if (fUniformDescriptorSet) {
186 commandBuffer->addRecycledResource(fUniformDescriptorSet);
187 }
188 if (fGeometryUniformBuffer) {
189 commandBuffer->addRecycledResource(fGeometryUniformBuffer->resource());
190 }
191 if (fFragmentUniformBuffer) {
192 commandBuffer->addRecycledResource(fFragmentUniformBuffer->resource());
193 }
194 }
195 }
196
setAndBindTextures(GrVkGpu * gpu,const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline,const GrTextureProxy * const primProcTextures[],GrVkCommandBuffer * commandBuffer)197 void GrVkPipelineState::setAndBindTextures(GrVkGpu* gpu,
198 const GrPrimitiveProcessor& primProc,
199 const GrPipeline& pipeline,
200 const GrTextureProxy* const primProcTextures[],
201 GrVkCommandBuffer* commandBuffer) {
202 SkASSERT(primProcTextures || !primProc.numTextureSamplers());
203
204 struct SamplerBindings {
205 GrSamplerState fState;
206 GrVkTexture* fTexture;
207 };
208 SkAutoSTMalloc<8, SamplerBindings> samplerBindings(fNumSamplers);
209 int currTextureBinding = 0;
210
211 fGeometryProcessor->setData(fDataManager, primProc,
212 GrFragmentProcessor::CoordTransformIter(pipeline));
213 for (int i = 0; i < primProc.numTextureSamplers(); ++i) {
214 const auto& sampler = primProc.textureSampler(i);
215 auto texture = static_cast<GrVkTexture*>(primProcTextures[i]->peekTexture());
216 samplerBindings[currTextureBinding++] = {sampler.samplerState(), texture};
217 }
218
219 GrFragmentProcessor::Iter iter(pipeline);
220 GrGLSLFragmentProcessor::Iter glslIter(fFragmentProcessors.get(), fFragmentProcessorCnt);
221 const GrFragmentProcessor* fp = iter.next();
222 GrGLSLFragmentProcessor* glslFP = glslIter.next();
223 while (fp && glslFP) {
224 for (int i = 0; i < fp->numTextureSamplers(); ++i) {
225 const auto& sampler = fp->textureSampler(i);
226 samplerBindings[currTextureBinding++] =
227 {sampler.samplerState(), static_cast<GrVkTexture*>(sampler.peekTexture())};
228 }
229 fp = iter.next();
230 glslFP = glslIter.next();
231 }
232 SkASSERT(!fp && !glslFP);
233
234 if (GrTextureProxy* dstTextureProxy = pipeline.dstTextureProxy()) {
235 samplerBindings[currTextureBinding++] = {
236 GrSamplerState::ClampNearest(),
237 static_cast<GrVkTexture*>(dstTextureProxy->peekTexture())};
238 }
239
240 // Get new descriptor set
241 SkASSERT(fNumSamplers == currTextureBinding);
242 if (fNumSamplers) {
243 if (fSamplerDescriptorSet) {
244 fSamplerDescriptorSet->recycle(gpu);
245 }
246 fSamplerDescriptorSet = gpu->resourceProvider().getSamplerDescriptorSet(fSamplerDSHandle);
247 int samplerDSIdx = GrVkUniformHandler::kSamplerDescSet;
248 fDescriptorSets[samplerDSIdx] = fSamplerDescriptorSet->descriptorSet();
249 for (int i = 0; i < fNumSamplers; ++i) {
250 const GrSamplerState& state = samplerBindings[i].fState;
251 GrVkTexture* texture = samplerBindings[i].fTexture;
252
253 const GrVkImageView* textureView = texture->textureView();
254 const GrVkSampler* sampler = nullptr;
255 if (fImmutableSamplers[i]) {
256 sampler = fImmutableSamplers[i];
257 } else {
258 sampler = gpu->resourceProvider().findOrCreateCompatibleSampler(
259 state, texture->ycbcrConversionInfo());
260 }
261 SkASSERT(sampler);
262
263 VkDescriptorImageInfo imageInfo;
264 memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
265 imageInfo.sampler = sampler->sampler();
266 imageInfo.imageView = textureView->imageView();
267 imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
268
269 VkWriteDescriptorSet writeInfo;
270 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
271 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
272 writeInfo.pNext = nullptr;
273 writeInfo.dstSet = fDescriptorSets[GrVkUniformHandler::kSamplerDescSet];
274 writeInfo.dstBinding = i;
275 writeInfo.dstArrayElement = 0;
276 writeInfo.descriptorCount = 1;
277 writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
278 writeInfo.pImageInfo = &imageInfo;
279 writeInfo.pBufferInfo = nullptr;
280 writeInfo.pTexelBufferView = nullptr;
281
282 GR_VK_CALL(gpu->vkInterface(),
283 UpdateDescriptorSets(gpu->device(), 1, &writeInfo, 0, nullptr));
284 commandBuffer->addResource(sampler);
285 if (!fImmutableSamplers[i]) {
286 sampler->unref(gpu);
287 }
288 commandBuffer->addResource(samplerBindings[i].fTexture->textureView());
289 commandBuffer->addResource(samplerBindings[i].fTexture->resource());
290 }
291
292 commandBuffer->bindDescriptorSets(gpu, this, fPipelineLayout, samplerDSIdx, 1,
293 &fDescriptorSets[samplerDSIdx], 0, nullptr);
294 commandBuffer->addRecycledResource(fSamplerDescriptorSet);
295 }
296 }
297
set_uniform_descriptor_writes(VkWriteDescriptorSet * descriptorWrite,VkDescriptorBufferInfo * bufferInfo,const GrVkUniformBuffer * buffer,VkDescriptorSet descriptorSet,uint32_t binding)298 void set_uniform_descriptor_writes(VkWriteDescriptorSet* descriptorWrite,
299 VkDescriptorBufferInfo* bufferInfo,
300 const GrVkUniformBuffer* buffer,
301 VkDescriptorSet descriptorSet,
302 uint32_t binding) {
303
304 memset(bufferInfo, 0, sizeof(VkDescriptorBufferInfo));
305 bufferInfo->buffer = buffer->buffer();
306 bufferInfo->offset = buffer->offset();
307 bufferInfo->range = buffer->size();
308
309 memset(descriptorWrite, 0, sizeof(VkWriteDescriptorSet));
310 descriptorWrite->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
311 descriptorWrite->pNext = nullptr;
312 descriptorWrite->dstSet = descriptorSet;
313 descriptorWrite->dstBinding = binding;
314 descriptorWrite->dstArrayElement = 0;
315 descriptorWrite->descriptorCount = 1;
316 descriptorWrite->descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
317 descriptorWrite->pImageInfo = nullptr;
318 descriptorWrite->pBufferInfo = bufferInfo;
319 descriptorWrite->pTexelBufferView = nullptr;
320 }
321
writeUniformBuffers(const GrVkGpu * gpu)322 void GrVkPipelineState::writeUniformBuffers(const GrVkGpu* gpu) {
323 VkWriteDescriptorSet descriptorWrites[3];
324 VkDescriptorBufferInfo bufferInfos[3];
325
326 uint32_t writeCount = 0;
327
328 // Geometry Uniform Buffer
329 if (fGeometryUniformBuffer.get()) {
330 set_uniform_descriptor_writes(&descriptorWrites[writeCount],
331 &bufferInfos[writeCount],
332 fGeometryUniformBuffer.get(),
333 fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet],
334 GrVkUniformHandler::kGeometryBinding);
335 ++writeCount;
336 }
337
338 // Fragment Uniform Buffer
339 if (fFragmentUniformBuffer.get()) {
340 set_uniform_descriptor_writes(&descriptorWrites[writeCount],
341 &bufferInfos[writeCount],
342 fFragmentUniformBuffer.get(),
343 fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet],
344 GrVkUniformHandler::kFragBinding);
345 ++writeCount;
346 }
347
348 if (writeCount) {
349 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(),
350 writeCount,
351 descriptorWrites,
352 0, nullptr));
353 }
354 }
355
setRenderTargetState(const GrRenderTarget * rt,GrSurfaceOrigin origin)356 void GrVkPipelineState::setRenderTargetState(const GrRenderTarget* rt, GrSurfaceOrigin origin) {
357
358 // Load the RT height uniform if it is needed to y-flip gl_FragCoord.
359 if (fBuiltinUniformHandles.fRTHeightUni.isValid() &&
360 fRenderTargetState.fRenderTargetSize.fHeight != rt->height()) {
361 fDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni, SkIntToScalar(rt->height()));
362 }
363
364 // set RT adjustment
365 SkISize size;
366 size.set(rt->width(), rt->height());
367 SkASSERT(fBuiltinUniformHandles.fRTAdjustmentUni.isValid());
368 if (fRenderTargetState.fRenderTargetOrigin != origin ||
369 fRenderTargetState.fRenderTargetSize != size) {
370 fRenderTargetState.fRenderTargetSize = size;
371 fRenderTargetState.fRenderTargetOrigin = origin;
372
373 float rtAdjustmentVec[4];
374 fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec);
375 fDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec);
376 }
377 }
378
bindPipeline(const GrVkGpu * gpu,GrVkCommandBuffer * commandBuffer)379 void GrVkPipelineState::bindPipeline(const GrVkGpu* gpu, GrVkCommandBuffer* commandBuffer) {
380 commandBuffer->bindPipeline(gpu, fPipeline);
381 }
382