1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrVkGpuCommandBuffer.h"
9
10 #include "GrBackendDrawableInfo.h"
11 #include "GrFixedClip.h"
12 #include "GrMesh.h"
13 #include "GrOpFlushState.h"
14 #include "GrPipeline.h"
15 #include "GrRenderTargetPriv.h"
16 #include "GrTexturePriv.h"
17 #include "GrVkCommandBuffer.h"
18 #include "GrVkCommandPool.h"
19 #include "GrVkGpu.h"
20 #include "GrVkPipeline.h"
21 #include "GrVkRenderPass.h"
22 #include "GrVkRenderTarget.h"
23 #include "GrVkResourceProvider.h"
24 #include "GrVkSemaphore.h"
25 #include "GrVkTexture.h"
26 #include "SkDrawable.h"
27 #include "SkRect.h"
28
copy(GrSurface * src,GrSurfaceOrigin srcOrigin,const SkIRect & srcRect,const SkIPoint & dstPoint)29 void GrVkGpuTextureCommandBuffer::copy(GrSurface* src, GrSurfaceOrigin srcOrigin,
30 const SkIRect& srcRect, const SkIPoint& dstPoint) {
31 fCopies.emplace_back(src, srcOrigin, srcRect, dstPoint);
32 }
33
insertEventMarker(const char * msg)34 void GrVkGpuTextureCommandBuffer::insertEventMarker(const char* msg) {
35 // TODO: does Vulkan have a correlate?
36 }
37
submit()38 void GrVkGpuTextureCommandBuffer::submit() {
39 for (int i = 0; i < fCopies.count(); ++i) {
40 CopyInfo& copyInfo = fCopies[i];
41 fGpu->copySurface(fTexture, fOrigin, copyInfo.fSrc, copyInfo.fSrcOrigin, copyInfo.fSrcRect,
42 copyInfo.fDstPoint);
43 }
44 }
45
~GrVkGpuTextureCommandBuffer()46 GrVkGpuTextureCommandBuffer::~GrVkGpuTextureCommandBuffer() {}
47
48 ////////////////////////////////////////////////////////////////////////////////
49
get_vk_load_store_ops(GrLoadOp loadOpIn,GrStoreOp storeOpIn,VkAttachmentLoadOp * loadOp,VkAttachmentStoreOp * storeOp)50 void get_vk_load_store_ops(GrLoadOp loadOpIn, GrStoreOp storeOpIn,
51 VkAttachmentLoadOp* loadOp, VkAttachmentStoreOp* storeOp) {
52 switch (loadOpIn) {
53 case GrLoadOp::kLoad:
54 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
55 break;
56 case GrLoadOp::kClear:
57 *loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
58 break;
59 case GrLoadOp::kDiscard:
60 *loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
61 break;
62 default:
63 SK_ABORT("Invalid LoadOp");
64 *loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
65 }
66
67 switch (storeOpIn) {
68 case GrStoreOp::kStore:
69 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
70 break;
71 case GrStoreOp::kDiscard:
72 *storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
73 break;
74 default:
75 SK_ABORT("Invalid StoreOp");
76 *storeOp = VK_ATTACHMENT_STORE_OP_STORE;
77 }
78 }
79
GrVkGpuRTCommandBuffer(GrVkGpu * gpu)80 GrVkGpuRTCommandBuffer::GrVkGpuRTCommandBuffer(GrVkGpu* gpu)
81 : fCurrentCmdInfo(-1)
82 , fGpu(gpu)
83 , fLastPipelineState(nullptr) {
84 }
85
init()86 void GrVkGpuRTCommandBuffer::init() {
87 GrVkRenderPass::LoadStoreOps vkColorOps(fVkColorLoadOp, fVkColorStoreOp);
88 GrVkRenderPass::LoadStoreOps vkStencilOps(fVkStencilLoadOp, fVkStencilStoreOp);
89
90 CommandBufferInfo& cbInfo = fCommandBufferInfos.push_back();
91 SkASSERT(fCommandBufferInfos.count() == 1);
92 fCurrentCmdInfo = 0;
93
94 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
95 const GrVkResourceProvider::CompatibleRPHandle& rpHandle = vkRT->compatibleRenderPassHandle();
96 if (rpHandle.isValid()) {
97 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
98 vkColorOps,
99 vkStencilOps);
100 } else {
101 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
102 vkColorOps,
103 vkStencilOps);
104 }
105
106 cbInfo.fColorClearValue.color.float32[0] = fClearColor[0];
107 cbInfo.fColorClearValue.color.float32[1] = fClearColor[1];
108 cbInfo.fColorClearValue.color.float32[2] = fClearColor[2];
109 cbInfo.fColorClearValue.color.float32[3] = fClearColor[3];
110
111 if (VK_ATTACHMENT_LOAD_OP_CLEAR == fVkColorLoadOp) {
112 cbInfo.fBounds = SkRect::MakeWH(vkRT->width(), vkRT->height());
113 } else {
114 cbInfo.fBounds.setEmpty();
115 }
116
117 if (VK_ATTACHMENT_LOAD_OP_CLEAR == fVkColorLoadOp) {
118 cbInfo.fLoadStoreState = LoadStoreState::kStartsWithClear;
119 } else if (VK_ATTACHMENT_LOAD_OP_LOAD == fVkColorLoadOp &&
120 VK_ATTACHMENT_STORE_OP_STORE == fVkColorStoreOp) {
121 cbInfo.fLoadStoreState = LoadStoreState::kLoadAndStore;
122 } else if (VK_ATTACHMENT_LOAD_OP_DONT_CARE == fVkColorLoadOp) {
123 cbInfo.fLoadStoreState = LoadStoreState::kStartsWithDiscard;
124 }
125
126 cbInfo.fCommandBuffers.push_back(fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu));
127 cbInfo.currentCmdBuf()->begin(fGpu, vkRT->framebuffer(), cbInfo.fRenderPass);
128 }
129
initWrapped()130 void GrVkGpuRTCommandBuffer::initWrapped() {
131 CommandBufferInfo& cbInfo = fCommandBufferInfos.push_back();
132 SkASSERT(fCommandBufferInfos.count() == 1);
133 fCurrentCmdInfo = 0;
134
135 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
136 SkASSERT(vkRT->wrapsSecondaryCommandBuffer());
137 cbInfo.fRenderPass = vkRT->externalRenderPass();
138 cbInfo.fRenderPass->ref();
139
140 cbInfo.fBounds.setEmpty();
141 cbInfo.fCommandBuffers.push_back(vkRT->getExternalSecondaryCommandBuffer());
142 cbInfo.fCommandBuffers[0]->ref();
143 cbInfo.currentCmdBuf()->begin(fGpu, nullptr, cbInfo.fRenderPass);
144 }
145
~GrVkGpuRTCommandBuffer()146 GrVkGpuRTCommandBuffer::~GrVkGpuRTCommandBuffer() {
147 this->reset();
148 }
149
gpu()150 GrGpu* GrVkGpuRTCommandBuffer::gpu() { return fGpu; }
151
end()152 void GrVkGpuRTCommandBuffer::end() {
153 if (fCurrentCmdInfo >= 0) {
154 fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf()->end(fGpu);
155 }
156 }
157
submit()158 void GrVkGpuRTCommandBuffer::submit() {
159 if (!fRenderTarget) {
160 return;
161 }
162
163 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
164 GrVkImage* targetImage = vkRT->msaaImage() ? vkRT->msaaImage() : vkRT;
165 GrStencilAttachment* stencil = fRenderTarget->renderTargetPriv().getStencilAttachment();
166
167 for (int i = 0; i < fCommandBufferInfos.count(); ++i) {
168 CommandBufferInfo& cbInfo = fCommandBufferInfos[i];
169
170 for (int j = 0; j < cbInfo.fPreDrawUploads.count(); ++j) {
171 InlineUploadInfo& iuInfo = cbInfo.fPreDrawUploads[j];
172 iuInfo.fFlushState->doUpload(iuInfo.fUpload);
173 }
174
175 for (int j = 0; j < cbInfo.fPreCopies.count(); ++j) {
176 CopyInfo& copyInfo = cbInfo.fPreCopies[j];
177 fGpu->copySurface(fRenderTarget, fOrigin, copyInfo.fSrc, copyInfo.fSrcOrigin,
178 copyInfo.fSrcRect, copyInfo.fDstPoint, copyInfo.fShouldDiscardDst);
179 }
180
181
182 // TODO: Many things create a scratch texture which adds the discard immediately, but then
183 // don't draw to it right away. This causes the discard to be ignored and we get yelled at
184 // for loading uninitialized data. However, once MDB lands with reordering, the discard will
185 // get reordered with the rest of the draw commands and we can remove the discard check.
186 if (cbInfo.fIsEmpty &&
187 cbInfo.fLoadStoreState != LoadStoreState::kStartsWithClear &&
188 cbInfo.fLoadStoreState != LoadStoreState::kStartsWithDiscard) {
189 // We have sumbitted no actual draw commands to the command buffer and we are not using
190 // the render pass to do a clear so there is no need to submit anything.
191 continue;
192 }
193
194 // We don't want to actually submit the secondary command buffer if it is wrapped.
195 if (this->wrapsSecondaryCommandBuffer()) {
196 // If we have any sampled images set their layout now.
197 for (int j = 0; j < cbInfo.fSampledImages.count(); ++j) {
198 cbInfo.fSampledImages[j]->setImageLayout(fGpu,
199 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
200 VK_ACCESS_SHADER_READ_BIT,
201 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
202 false);
203 }
204
205 // There should have only been one secondary command buffer in the wrapped case so it is
206 // safe to just return here.
207 SkASSERT(fCommandBufferInfos.count() == 1);
208 return;
209 }
210
211 // Make sure if we only have a discard load that we execute the discard on the whole image.
212 // TODO: Once we improve our tracking of discards so that we never end up flushing a discard
213 // call with no actually ops, remove this.
214 if (cbInfo.fIsEmpty && cbInfo.fLoadStoreState == LoadStoreState::kStartsWithDiscard) {
215 cbInfo.fBounds = SkRect::MakeWH(vkRT->width(), vkRT->height());
216 }
217
218 if (cbInfo.fBounds.intersect(0, 0,
219 SkIntToScalar(fRenderTarget->width()),
220 SkIntToScalar(fRenderTarget->height()))) {
221 // Make sure we do the following layout changes after all copies, uploads, or any other
222 // pre-work is done since we may change the layouts in the pre-work. Also since the
223 // draws will be submitted in different render passes, we need to guard againts write
224 // and write issues.
225
226 // Change layout of our render target so it can be used as the color attachment.
227 // TODO: If we know that we will never be blending or loading the attachment we could
228 // drop the VK_ACCESS_COLOR_ATTACHMENT_READ_BIT.
229 targetImage->setImageLayout(fGpu,
230 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
231 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
232 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
233 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
234 false);
235
236 // If we are using a stencil attachment we also need to update its layout
237 if (stencil) {
238 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
239 // We need the write and read access bits since we may load and store the stencil.
240 // The initial load happens in the VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT so we
241 // wait there.
242 vkStencil->setImageLayout(fGpu,
243 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
244 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
245 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
246 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
247 false);
248 }
249
250 // If we have any sampled images set their layout now.
251 for (int j = 0; j < cbInfo.fSampledImages.count(); ++j) {
252 cbInfo.fSampledImages[j]->setImageLayout(fGpu,
253 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
254 VK_ACCESS_SHADER_READ_BIT,
255 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
256 false);
257 }
258
259 SkIRect iBounds;
260 cbInfo.fBounds.roundOut(&iBounds);
261
262 fGpu->submitSecondaryCommandBuffer(cbInfo.fCommandBuffers, cbInfo.fRenderPass,
263 &cbInfo.fColorClearValue, vkRT, fOrigin, iBounds);
264 }
265 }
266 }
267
set(GrRenderTarget * rt,GrSurfaceOrigin origin,const GrGpuRTCommandBuffer::LoadAndStoreInfo & colorInfo,const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo & stencilInfo)268 void GrVkGpuRTCommandBuffer::set(GrRenderTarget* rt, GrSurfaceOrigin origin,
269 const GrGpuRTCommandBuffer::LoadAndStoreInfo& colorInfo,
270 const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo) {
271 SkASSERT(!fRenderTarget);
272 SkASSERT(fCommandBufferInfos.empty());
273 SkASSERT(-1 == fCurrentCmdInfo);
274 SkASSERT(fGpu == rt->getContext()->contextPriv().getGpu());
275 SkASSERT(!fLastPipelineState);
276
277 this->INHERITED::set(rt, origin);
278
279 if (this->wrapsSecondaryCommandBuffer()) {
280 this->initWrapped();
281 return;
282 }
283
284 fClearColor = colorInfo.fClearColor;
285
286 get_vk_load_store_ops(colorInfo.fLoadOp, colorInfo.fStoreOp,
287 &fVkColorLoadOp, &fVkColorStoreOp);
288
289 get_vk_load_store_ops(stencilInfo.fLoadOp, stencilInfo.fStoreOp,
290 &fVkStencilLoadOp, &fVkStencilStoreOp);
291
292 this->init();
293 }
294
reset()295 void GrVkGpuRTCommandBuffer::reset() {
296 for (int i = 0; i < fCommandBufferInfos.count(); ++i) {
297 CommandBufferInfo& cbInfo = fCommandBufferInfos[i];
298 for (int j = 0; j < cbInfo.fCommandBuffers.count(); ++j) {
299 cbInfo.fCommandBuffers[j]->unref(fGpu);
300 }
301 cbInfo.fRenderPass->unref(fGpu);
302 }
303 fCommandBufferInfos.reset();
304
305 fCurrentCmdInfo = -1;
306
307 fLastPipelineState = nullptr;
308 fRenderTarget = nullptr;
309 }
310
wrapsSecondaryCommandBuffer() const311 bool GrVkGpuRTCommandBuffer::wrapsSecondaryCommandBuffer() const {
312 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
313 return vkRT->wrapsSecondaryCommandBuffer();
314 }
315
316 ////////////////////////////////////////////////////////////////////////////////
317
discard()318 void GrVkGpuRTCommandBuffer::discard() {
319 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
320
321 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
322 if (cbInfo.fIsEmpty) {
323 // Change the render pass to do a don't-care load for both color & stencil
324 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_DONT_CARE,
325 VK_ATTACHMENT_STORE_OP_STORE);
326 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_DONT_CARE,
327 VK_ATTACHMENT_STORE_OP_STORE);
328
329 const GrVkRenderPass* oldRP = cbInfo.fRenderPass;
330
331 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
332 vkRT->compatibleRenderPassHandle();
333 if (rpHandle.isValid()) {
334 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
335 vkColorOps,
336 vkStencilOps);
337 } else {
338 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
339 vkColorOps,
340 vkStencilOps);
341 }
342
343 SkASSERT(cbInfo.fRenderPass->isCompatible(*oldRP));
344 oldRP->unref(fGpu);
345 cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
346 cbInfo.fLoadStoreState = LoadStoreState::kStartsWithDiscard;
347 // If we are going to discard the whole render target then the results of any copies we did
348 // immediately before to the target won't matter, so just drop them.
349 cbInfo.fPreCopies.reset();
350 }
351 }
352
insertEventMarker(const char * msg)353 void GrVkGpuRTCommandBuffer::insertEventMarker(const char* msg) {
354 // TODO: does Vulkan have a correlate?
355 }
356
onClearStencilClip(const GrFixedClip & clip,bool insideStencilMask)357 void GrVkGpuRTCommandBuffer::onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) {
358 SkASSERT(!clip.hasWindowRectangles());
359
360 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
361
362 GrStencilAttachment* sb = fRenderTarget->renderTargetPriv().getStencilAttachment();
363 // this should only be called internally when we know we have a
364 // stencil buffer.
365 SkASSERT(sb);
366 int stencilBitCount = sb->bits();
367
368 // The contract with the callers does not guarantee that we preserve all bits in the stencil
369 // during this clear. Thus we will clear the entire stencil to the desired value.
370
371 VkClearDepthStencilValue vkStencilColor;
372 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
373 if (insideStencilMask) {
374 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
375 } else {
376 vkStencilColor.stencil = 0;
377 }
378
379 VkClearRect clearRect;
380 // Flip rect if necessary
381 SkIRect vkRect;
382 if (!clip.scissorEnabled()) {
383 vkRect.setXYWH(0, 0, fRenderTarget->width(), fRenderTarget->height());
384 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
385 vkRect = clip.scissorRect();
386 } else {
387 const SkIRect& scissor = clip.scissorRect();
388 vkRect.setLTRB(scissor.fLeft, fRenderTarget->height() - scissor.fBottom,
389 scissor.fRight, fRenderTarget->height() - scissor.fTop);
390 }
391
392 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
393 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
394
395 clearRect.baseArrayLayer = 0;
396 clearRect.layerCount = 1;
397
398 uint32_t stencilIndex;
399 SkAssertResult(cbInfo.fRenderPass->stencilAttachmentIndex(&stencilIndex));
400
401 VkClearAttachment attachment;
402 attachment.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
403 attachment.colorAttachment = 0; // this value shouldn't matter
404 attachment.clearValue.depthStencil = vkStencilColor;
405
406 cbInfo.currentCmdBuf()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
407 cbInfo.fIsEmpty = false;
408
409 // Update command buffer bounds
410 if (!clip.scissorEnabled()) {
411 cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
412 } else {
413 cbInfo.fBounds.join(SkRect::Make(clip.scissorRect()));
414 }
415 }
416
onClear(const GrFixedClip & clip,const SkPMColor4f & color)417 void GrVkGpuRTCommandBuffer::onClear(const GrFixedClip& clip, const SkPMColor4f& color) {
418 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
419
420 // parent class should never let us get here with no RT
421 SkASSERT(!clip.hasWindowRectangles());
422
423 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
424
425 VkClearColorValue vkColor = {{color.fR, color.fG, color.fB, color.fA}};
426
427 if (cbInfo.fIsEmpty && !clip.scissorEnabled()) {
428 // Change the render pass to do a clear load
429 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_CLEAR,
430 VK_ATTACHMENT_STORE_OP_STORE);
431 // Preserve the stencil buffer's load & store settings
432 GrVkRenderPass::LoadStoreOps vkStencilOps(fVkStencilLoadOp, fVkStencilStoreOp);
433
434 const GrVkRenderPass* oldRP = cbInfo.fRenderPass;
435
436 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
437 vkRT->compatibleRenderPassHandle();
438 if (rpHandle.isValid()) {
439 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
440 vkColorOps,
441 vkStencilOps);
442 } else {
443 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
444 vkColorOps,
445 vkStencilOps);
446 }
447
448 SkASSERT(cbInfo.fRenderPass->isCompatible(*oldRP));
449 oldRP->unref(fGpu);
450
451 cbInfo.fColorClearValue.color = {{color.fR, color.fG, color.fB, color.fA}};
452 cbInfo.fLoadStoreState = LoadStoreState::kStartsWithClear;
453 // If we are going to clear the whole render target then the results of any copies we did
454 // immediately before to the target won't matter, so just drop them.
455 cbInfo.fPreCopies.reset();
456
457 // Update command buffer bounds
458 cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
459 return;
460 }
461
462 // We always do a sub rect clear with clearAttachments since we are inside a render pass
463 VkClearRect clearRect;
464 // Flip rect if necessary
465 SkIRect vkRect;
466 if (!clip.scissorEnabled()) {
467 vkRect.setXYWH(0, 0, fRenderTarget->width(), fRenderTarget->height());
468 } else if (kBottomLeft_GrSurfaceOrigin != fOrigin) {
469 vkRect = clip.scissorRect();
470 } else {
471 const SkIRect& scissor = clip.scissorRect();
472 vkRect.setLTRB(scissor.fLeft, fRenderTarget->height() - scissor.fBottom,
473 scissor.fRight, fRenderTarget->height() - scissor.fTop);
474 }
475 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
476 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.height() };
477 clearRect.baseArrayLayer = 0;
478 clearRect.layerCount = 1;
479
480 uint32_t colorIndex;
481 SkAssertResult(cbInfo.fRenderPass->colorAttachmentIndex(&colorIndex));
482
483 VkClearAttachment attachment;
484 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
485 attachment.colorAttachment = colorIndex;
486 attachment.clearValue.color = vkColor;
487
488 cbInfo.currentCmdBuf()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
489 cbInfo.fIsEmpty = false;
490
491 // Update command buffer bounds
492 if (!clip.scissorEnabled()) {
493 cbInfo.fBounds.join(fRenderTarget->getBoundsRect());
494 } else {
495 cbInfo.fBounds.join(SkRect::Make(clip.scissorRect()));
496 }
497 return;
498 }
499
500 ////////////////////////////////////////////////////////////////////////////////
501
addAdditionalCommandBuffer()502 void GrVkGpuRTCommandBuffer::addAdditionalCommandBuffer() {
503 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
504
505 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
506 cbInfo.currentCmdBuf()->end(fGpu);
507 cbInfo.fCommandBuffers.push_back(fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu));
508 cbInfo.currentCmdBuf()->begin(fGpu, vkRT->framebuffer(), cbInfo.fRenderPass);
509 }
510
addAdditionalRenderPass()511 void GrVkGpuRTCommandBuffer::addAdditionalRenderPass() {
512 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
513
514 fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf()->end(fGpu);
515
516 CommandBufferInfo& cbInfo = fCommandBufferInfos.push_back();
517 fCurrentCmdInfo++;
518
519 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
520 VK_ATTACHMENT_STORE_OP_STORE);
521 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
522 VK_ATTACHMENT_STORE_OP_STORE);
523
524 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
525 vkRT->compatibleRenderPassHandle();
526 if (rpHandle.isValid()) {
527 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
528 vkColorOps,
529 vkStencilOps);
530 } else {
531 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
532 vkColorOps,
533 vkStencilOps);
534 }
535 cbInfo.fLoadStoreState = LoadStoreState::kLoadAndStore;
536
537 cbInfo.fCommandBuffers.push_back(fGpu->cmdPool()->findOrCreateSecondaryCommandBuffer(fGpu));
538 // It shouldn't matter what we set the clear color to here since we will assume loading of the
539 // attachment.
540 memset(&cbInfo.fColorClearValue, 0, sizeof(VkClearValue));
541 cbInfo.fBounds.setEmpty();
542
543 cbInfo.currentCmdBuf()->begin(fGpu, vkRT->framebuffer(), cbInfo.fRenderPass);
544 }
545
inlineUpload(GrOpFlushState * state,GrDeferredTextureUploadFn & upload)546 void GrVkGpuRTCommandBuffer::inlineUpload(GrOpFlushState* state,
547 GrDeferredTextureUploadFn& upload) {
548 if (!fCommandBufferInfos[fCurrentCmdInfo].fIsEmpty) {
549 this->addAdditionalRenderPass();
550 }
551 fCommandBufferInfos[fCurrentCmdInfo].fPreDrawUploads.emplace_back(state, upload);
552 }
553
copy(GrSurface * src,GrSurfaceOrigin srcOrigin,const SkIRect & srcRect,const SkIPoint & dstPoint)554 void GrVkGpuRTCommandBuffer::copy(GrSurface* src, GrSurfaceOrigin srcOrigin, const SkIRect& srcRect,
555 const SkIPoint& dstPoint) {
556 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
557 if (!cbInfo.fIsEmpty || LoadStoreState::kStartsWithClear == cbInfo.fLoadStoreState) {
558 this->addAdditionalRenderPass();
559 }
560
561 fCommandBufferInfos[fCurrentCmdInfo].fPreCopies.emplace_back(
562 src, srcOrigin, srcRect, dstPoint,
563 LoadStoreState::kStartsWithDiscard == cbInfo.fLoadStoreState);
564
565 if (LoadStoreState::kLoadAndStore != cbInfo.fLoadStoreState) {
566 // Change the render pass to do a load and store so we don't lose the results of our copy
567 GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
568 VK_ATTACHMENT_STORE_OP_STORE);
569 GrVkRenderPass::LoadStoreOps vkStencilOps(VK_ATTACHMENT_LOAD_OP_LOAD,
570 VK_ATTACHMENT_STORE_OP_STORE);
571
572 const GrVkRenderPass* oldRP = cbInfo.fRenderPass;
573
574 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(fRenderTarget);
575 const GrVkResourceProvider::CompatibleRPHandle& rpHandle =
576 vkRT->compatibleRenderPassHandle();
577 if (rpHandle.isValid()) {
578 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(rpHandle,
579 vkColorOps,
580 vkStencilOps);
581 } else {
582 cbInfo.fRenderPass = fGpu->resourceProvider().findRenderPass(*vkRT,
583 vkColorOps,
584 vkStencilOps);
585 }
586 SkASSERT(cbInfo.fRenderPass->isCompatible(*oldRP));
587 oldRP->unref(fGpu);
588
589 cbInfo.fLoadStoreState = LoadStoreState::kLoadAndStore;
590
591 }
592 }
593
594 ////////////////////////////////////////////////////////////////////////////////
595
bindGeometry(const GrBuffer * indexBuffer,const GrBuffer * vertexBuffer,const GrBuffer * instanceBuffer)596 void GrVkGpuRTCommandBuffer::bindGeometry(const GrBuffer* indexBuffer,
597 const GrBuffer* vertexBuffer,
598 const GrBuffer* instanceBuffer) {
599 GrVkSecondaryCommandBuffer* currCmdBuf = fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf();
600 // There is no need to put any memory barriers to make sure host writes have finished here.
601 // When a command buffer is submitted to a queue, there is an implicit memory barrier that
602 // occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
603 // an active RenderPass.
604
605 // Here our vertex and instance inputs need to match the same 0-based bindings they were
606 // assigned in GrVkPipeline. That is, vertex first (if any) followed by instance.
607 uint32_t binding = 0;
608
609 if (vertexBuffer) {
610 SkASSERT(vertexBuffer);
611 SkASSERT(!vertexBuffer->isCPUBacked());
612 SkASSERT(!vertexBuffer->isMapped());
613
614 currCmdBuf->bindInputBuffer(fGpu, binding++,
615 static_cast<const GrVkVertexBuffer*>(vertexBuffer));
616 }
617
618 if (instanceBuffer) {
619 SkASSERT(instanceBuffer);
620 SkASSERT(!instanceBuffer->isCPUBacked());
621 SkASSERT(!instanceBuffer->isMapped());
622
623 currCmdBuf->bindInputBuffer(fGpu, binding++,
624 static_cast<const GrVkVertexBuffer*>(instanceBuffer));
625 }
626 if (indexBuffer) {
627 SkASSERT(indexBuffer);
628 SkASSERT(!indexBuffer->isMapped());
629 SkASSERT(!indexBuffer->isCPUBacked());
630
631 currCmdBuf->bindIndexBuffer(fGpu, static_cast<const GrVkIndexBuffer*>(indexBuffer));
632 }
633 }
634
prepareDrawState(const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline,const GrPipeline::FixedDynamicState * fixedDynamicState,const GrPipeline::DynamicStateArrays * dynamicStateArrays,GrPrimitiveType primitiveType)635 GrVkPipelineState* GrVkGpuRTCommandBuffer::prepareDrawState(
636 const GrPrimitiveProcessor& primProc,
637 const GrPipeline& pipeline,
638 const GrPipeline::FixedDynamicState* fixedDynamicState,
639 const GrPipeline::DynamicStateArrays* dynamicStateArrays,
640 GrPrimitiveType primitiveType) {
641 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
642 SkASSERT(cbInfo.fRenderPass);
643
644 VkRenderPass compatibleRenderPass = cbInfo.fRenderPass->vkRenderPass();
645
646 const GrTextureProxy* const* primProcProxies = nullptr;
647 if (dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures) {
648 primProcProxies = dynamicStateArrays->fPrimitiveProcessorTextures;
649 } else if (fixedDynamicState) {
650 primProcProxies = fixedDynamicState->fPrimitiveProcessorTextures;
651 }
652
653 SkASSERT(SkToBool(primProcProxies) == SkToBool(primProc.numTextureSamplers()));
654
655 GrVkPipelineState* pipelineState =
656 fGpu->resourceProvider().findOrCreateCompatiblePipelineState(fRenderTarget, fOrigin,
657 pipeline,
658 primProc,
659 primProcProxies,
660 primitiveType,
661 compatibleRenderPass);
662 if (!pipelineState) {
663 return pipelineState;
664 }
665
666 if (!cbInfo.fIsEmpty &&
667 fLastPipelineState && fLastPipelineState != pipelineState &&
668 fGpu->vkCaps().newCBOnPipelineChange()) {
669 this->addAdditionalCommandBuffer();
670 }
671 fLastPipelineState = pipelineState;
672
673 pipelineState->bindPipeline(fGpu, cbInfo.currentCmdBuf());
674
675 pipelineState->setAndBindUniforms(fGpu, fRenderTarget, fOrigin,
676 primProc, pipeline, cbInfo.currentCmdBuf());
677
678 // Check whether we need to bind textures between each GrMesh. If not we can bind them all now.
679 bool setTextures = !(dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures);
680 if (setTextures) {
681 pipelineState->setAndBindTextures(fGpu, primProc, pipeline, primProcProxies,
682 cbInfo.currentCmdBuf());
683 }
684
685 if (!pipeline.isScissorEnabled()) {
686 GrVkPipeline::SetDynamicScissorRectState(fGpu, cbInfo.currentCmdBuf(),
687 fRenderTarget, fOrigin,
688 SkIRect::MakeWH(fRenderTarget->width(),
689 fRenderTarget->height()));
690 } else if (!dynamicStateArrays || !dynamicStateArrays->fScissorRects) {
691 SkASSERT(fixedDynamicState);
692 GrVkPipeline::SetDynamicScissorRectState(fGpu, cbInfo.currentCmdBuf(), fRenderTarget,
693 fOrigin,
694 fixedDynamicState->fScissorRect);
695 }
696 GrVkPipeline::SetDynamicViewportState(fGpu, cbInfo.currentCmdBuf(), fRenderTarget);
697 GrVkPipeline::SetDynamicBlendConstantState(fGpu, cbInfo.currentCmdBuf(),
698 fRenderTarget->config(),
699 pipeline.getXferProcessor());
700
701 return pipelineState;
702 }
703
onDraw(const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline,const GrPipeline::FixedDynamicState * fixedDynamicState,const GrPipeline::DynamicStateArrays * dynamicStateArrays,const GrMesh meshes[],int meshCount,const SkRect & bounds)704 void GrVkGpuRTCommandBuffer::onDraw(const GrPrimitiveProcessor& primProc,
705 const GrPipeline& pipeline,
706 const GrPipeline::FixedDynamicState* fixedDynamicState,
707 const GrPipeline::DynamicStateArrays* dynamicStateArrays,
708 const GrMesh meshes[],
709 int meshCount,
710 const SkRect& bounds) {
711 if (!meshCount) {
712 return;
713 }
714
715 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
716
717 auto prepareSampledImage = [&](GrTexture* texture, GrSamplerState::Filter filter) {
718 GrVkTexture* vkTexture = static_cast<GrVkTexture*>(texture);
719 // We may need to resolve the texture first if it is also a render target
720 GrVkRenderTarget* texRT = static_cast<GrVkRenderTarget*>(vkTexture->asRenderTarget());
721 if (texRT) {
722 fGpu->resolveRenderTargetNoFlush(texRT);
723 }
724
725 // Check if we need to regenerate any mip maps
726 if (GrSamplerState::Filter::kMipMap == filter &&
727 (vkTexture->width() != 1 || vkTexture->height() != 1)) {
728 SkASSERT(vkTexture->texturePriv().mipMapped() == GrMipMapped::kYes);
729 if (vkTexture->texturePriv().mipMapsAreDirty()) {
730 fGpu->regenerateMipMapLevels(vkTexture);
731 }
732 }
733 cbInfo.fSampledImages.push_back(vkTexture);
734 };
735
736 if (dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures) {
737 for (int m = 0, i = 0; m < meshCount; ++m) {
738 for (int s = 0; s < primProc.numTextureSamplers(); ++s, ++i) {
739 auto texture = dynamicStateArrays->fPrimitiveProcessorTextures[i]->peekTexture();
740 prepareSampledImage(texture, primProc.textureSampler(s).samplerState().filter());
741 }
742 }
743 } else {
744 for (int i = 0; i < primProc.numTextureSamplers(); ++i) {
745 auto texture = fixedDynamicState->fPrimitiveProcessorTextures[i]->peekTexture();
746 prepareSampledImage(texture, primProc.textureSampler(i).samplerState().filter());
747 }
748 }
749 GrFragmentProcessor::Iter iter(pipeline);
750 while (const GrFragmentProcessor* fp = iter.next()) {
751 for (int i = 0; i < fp->numTextureSamplers(); ++i) {
752 const GrFragmentProcessor::TextureSampler& sampler = fp->textureSampler(i);
753 prepareSampledImage(sampler.peekTexture(), sampler.samplerState().filter());
754 }
755 }
756 if (GrTexture* dstTexture = pipeline.peekDstTexture()) {
757 cbInfo.fSampledImages.push_back(static_cast<GrVkTexture*>(dstTexture));
758 }
759
760 GrPrimitiveType primitiveType = meshes[0].primitiveType();
761 GrVkPipelineState* pipelineState = this->prepareDrawState(primProc, pipeline, fixedDynamicState,
762 dynamicStateArrays, primitiveType);
763 if (!pipelineState) {
764 return;
765 }
766
767 bool dynamicScissor =
768 pipeline.isScissorEnabled() && dynamicStateArrays && dynamicStateArrays->fScissorRects;
769 bool dynamicTextures = dynamicStateArrays && dynamicStateArrays->fPrimitiveProcessorTextures;
770
771 for (int i = 0; i < meshCount; ++i) {
772 const GrMesh& mesh = meshes[i];
773 if (mesh.primitiveType() != primitiveType) {
774 SkDEBUGCODE(pipelineState = nullptr);
775 primitiveType = mesh.primitiveType();
776 pipelineState = this->prepareDrawState(primProc, pipeline, fixedDynamicState,
777 dynamicStateArrays, primitiveType);
778 if (!pipelineState) {
779 return;
780 }
781 }
782
783 if (dynamicScissor) {
784 GrVkPipeline::SetDynamicScissorRectState(fGpu, cbInfo.currentCmdBuf(), fRenderTarget,
785 fOrigin,
786 dynamicStateArrays->fScissorRects[i]);
787 }
788 if (dynamicTextures) {
789 GrTextureProxy* const* meshProxies = dynamicStateArrays->fPrimitiveProcessorTextures +
790 primProc.numTextureSamplers() * i;
791 pipelineState->setAndBindTextures(fGpu, primProc, pipeline, meshProxies,
792 cbInfo.currentCmdBuf());
793 }
794 SkASSERT(pipelineState);
795 mesh.sendToGpu(this);
796 }
797
798 cbInfo.fBounds.join(bounds);
799 cbInfo.fIsEmpty = false;
800 }
801
sendInstancedMeshToGpu(GrPrimitiveType,const GrBuffer * vertexBuffer,int vertexCount,int baseVertex,const GrBuffer * instanceBuffer,int instanceCount,int baseInstance)802 void GrVkGpuRTCommandBuffer::sendInstancedMeshToGpu(GrPrimitiveType,
803 const GrBuffer* vertexBuffer,
804 int vertexCount,
805 int baseVertex,
806 const GrBuffer* instanceBuffer,
807 int instanceCount,
808 int baseInstance) {
809 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
810 this->bindGeometry(nullptr, vertexBuffer, instanceBuffer);
811 cbInfo.currentCmdBuf()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
812 fGpu->stats()->incNumDraws();
813 }
814
sendIndexedInstancedMeshToGpu(GrPrimitiveType,const GrBuffer * indexBuffer,int indexCount,int baseIndex,const GrBuffer * vertexBuffer,int baseVertex,const GrBuffer * instanceBuffer,int instanceCount,int baseInstance,GrPrimitiveRestart restart)815 void GrVkGpuRTCommandBuffer::sendIndexedInstancedMeshToGpu(GrPrimitiveType,
816 const GrBuffer* indexBuffer,
817 int indexCount,
818 int baseIndex,
819 const GrBuffer* vertexBuffer,
820 int baseVertex,
821 const GrBuffer* instanceBuffer,
822 int instanceCount,
823 int baseInstance,
824 GrPrimitiveRestart restart) {
825 SkASSERT(restart == GrPrimitiveRestart::kNo);
826 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
827 this->bindGeometry(indexBuffer, vertexBuffer, instanceBuffer);
828 cbInfo.currentCmdBuf()->drawIndexed(fGpu, indexCount, instanceCount,
829 baseIndex, baseVertex, baseInstance);
830 fGpu->stats()->incNumDraws();
831 }
832
833 ////////////////////////////////////////////////////////////////////////////////
834
executeDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable)835 void GrVkGpuRTCommandBuffer::executeDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable) {
836 GrVkRenderTarget* target = static_cast<GrVkRenderTarget*>(fRenderTarget);
837
838 GrVkImage* targetImage = target->msaaImage() ? target->msaaImage() : target;
839
840 CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
841 VkRect2D bounds;
842 bounds.offset = { 0, 0 };
843 bounds.extent = { 0, 0 };
844
845 GrVkDrawableInfo vkInfo;
846 vkInfo.fSecondaryCommandBuffer = cbInfo.currentCmdBuf()->vkCommandBuffer();
847 vkInfo.fCompatibleRenderPass = cbInfo.fRenderPass->vkRenderPass();
848 SkAssertResult(cbInfo.fRenderPass->colorAttachmentIndex(&vkInfo.fColorAttachmentIndex));
849 vkInfo.fFormat = targetImage->imageFormat();
850 vkInfo.fDrawBounds = &bounds;
851
852 GrBackendDrawableInfo info(vkInfo);
853
854 // After we draw into the command buffer via the drawable, cached state we have may be invalid.
855 cbInfo.currentCmdBuf()->invalidateState();
856
857 drawable->draw(info);
858 fGpu->addDrawable(std::move(drawable));
859
860 if (bounds.extent.width == 0 || bounds.extent.height == 0) {
861 cbInfo.fBounds.join(target->getBoundsRect());
862 } else {
863 cbInfo.fBounds.join(SkRect::MakeXYWH(bounds.offset.x, bounds.offset.y,
864 bounds.extent.width, bounds.extent.height));
865 }
866 }
867
868