1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrVkCommandBuffer_DEFINED 9 #define GrVkCommandBuffer_DEFINED 10 11 #include "GrVkGpu.h" 12 #include "GrVkResource.h" 13 #include "GrVkSemaphore.h" 14 #include "GrVkUtil.h" 15 #include "vk/GrVkTypes.h" 16 17 class GrVkBuffer; 18 class GrVkFramebuffer; 19 class GrVkIndexBuffer; 20 class GrVkImage; 21 class GrVkPipeline; 22 class GrVkPipelineState; 23 class GrVkRenderPass; 24 class GrVkRenderTarget; 25 class GrVkTransferBuffer; 26 class GrVkVertexBuffer; 27 28 class GrVkCommandBuffer : public GrVkResource { 29 public: 30 void invalidateState(); 31 32 //////////////////////////////////////////////////////////////////////////// 33 // CommandBuffer commands 34 //////////////////////////////////////////////////////////////////////////// 35 enum BarrierType { 36 kMemory_BarrierType, 37 kBufferMemory_BarrierType, 38 kImageMemory_BarrierType 39 }; 40 41 void pipelineBarrier(const GrVkGpu* gpu, 42 const GrVkResource* resource, 43 VkPipelineStageFlags srcStageMask, 44 VkPipelineStageFlags dstStageMask, 45 bool byRegion, 46 BarrierType barrierType, 47 void* barrier); 48 49 void bindInputBuffer(GrVkGpu* gpu, uint32_t binding, const GrVkVertexBuffer* vbuffer); 50 51 void bindIndexBuffer(GrVkGpu* gpu, const GrVkIndexBuffer* ibuffer); 52 53 void bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline); 54 55 void bindDescriptorSets(const GrVkGpu* gpu, 56 GrVkPipelineState*, 57 GrVkPipelineLayout* layout, 58 uint32_t firstSet, 59 uint32_t setCount, 60 const VkDescriptorSet* descriptorSets, 61 uint32_t dynamicOffsetCount, 62 const uint32_t* dynamicOffsets); 63 64 void bindDescriptorSets(const GrVkGpu* gpu, 65 const SkTArray<const GrVkRecycledResource*>&, 66 const SkTArray<const GrVkResource*>&, 67 GrVkPipelineLayout* layout, 68 uint32_t firstSet, 69 uint32_t setCount, 70 const VkDescriptorSet* descriptorSets, 71 uint32_t dynamicOffsetCount, 72 const uint32_t* dynamicOffsets); 73 commandPool()74 GrVkCommandPool* commandPool() { return fCmdPool; } 75 76 void setViewport(const GrVkGpu* gpu, 77 uint32_t firstViewport, 78 uint32_t viewportCount, 79 const VkViewport* viewports); 80 81 void setScissor(const GrVkGpu* gpu, 82 uint32_t firstScissor, 83 uint32_t scissorCount, 84 const VkRect2D* scissors); 85 86 void setBlendConstants(const GrVkGpu* gpu, const float blendConstants[4]); 87 88 // Commands that only work inside of a render pass 89 void clearAttachments(const GrVkGpu* gpu, 90 int numAttachments, 91 const VkClearAttachment* attachments, 92 int numRects, 93 const VkClearRect* clearRects) const; 94 95 void drawIndexed(const GrVkGpu* gpu, 96 uint32_t indexCount, 97 uint32_t instanceCount, 98 uint32_t firstIndex, 99 int32_t vertexOffset, 100 uint32_t firstInstance) const; 101 102 void draw(const GrVkGpu* gpu, 103 uint32_t vertexCount, 104 uint32_t instanceCount, 105 uint32_t firstVertex, 106 uint32_t firstInstance) const; 107 108 // Add ref-counted resource that will be tracked and released when this command buffer finishes 109 // execution addResource(const GrVkResource * resource)110 void addResource(const GrVkResource* resource) { 111 resource->ref(); 112 resource->notifyAddedToCommandBuffer(); 113 fTrackedResources.append(1, &resource); 114 } 115 116 // Add ref-counted resource that will be tracked and released when this command buffer finishes 117 // execution. When it is released, it will signal that the resource can be recycled for reuse. addRecycledResource(const GrVkRecycledResource * resource)118 void addRecycledResource(const GrVkRecycledResource* resource) { 119 resource->ref(); 120 resource->notifyAddedToCommandBuffer(); 121 fTrackedRecycledResources.append(1, &resource); 122 } 123 124 // Add ref-counted resource that will be tracked and released when this command buffer finishes 125 // recording. addRecordingResource(const GrVkResource * resource)126 void addRecordingResource(const GrVkResource* resource) { 127 resource->ref(); 128 resource->notifyAddedToCommandBuffer(); 129 fTrackedRecordingResources.append(1, &resource); 130 } 131 132 void releaseResources(GrVkGpu* gpu); 133 134 protected: 135 GrVkCommandBuffer(VkCommandBuffer cmdBuffer, GrVkCommandPool* cmdPool, 136 const GrVkRenderPass* rp = nullptr) fIsActive(false)137 : fIsActive(false) 138 , fActiveRenderPass(rp) 139 , fCmdBuffer(cmdBuffer) 140 , fCmdPool(cmdPool) 141 , fNumResets(0) { 142 fTrackedResources.setReserve(kInitialTrackedResourcesCount); 143 fTrackedRecycledResources.setReserve(kInitialTrackedResourcesCount); 144 fTrackedRecordingResources.setReserve(kInitialTrackedResourcesCount); 145 this->invalidateState(); 146 } 147 isWrapped()148 bool isWrapped() const { 149 return fCmdPool == nullptr; 150 } 151 152 SkTDArray<const GrVkResource*> fTrackedResources; 153 SkTDArray<const GrVkRecycledResource*> fTrackedRecycledResources; 154 SkTDArray<const GrVkResource*> fTrackedRecordingResources; 155 156 // Tracks whether we are in the middle of a command buffer begin/end calls and thus can add 157 // new commands to the buffer; 158 bool fIsActive; 159 160 // Stores a pointer to the current active render pass (i.e. begin has been called but not 161 // end). A nullptr means there is no active render pass. The GrVKCommandBuffer does not own 162 // the render pass. 163 const GrVkRenderPass* fActiveRenderPass; 164 165 VkCommandBuffer fCmdBuffer; 166 167 // Raw pointer, not refcounted. The command pool controls the command buffer's lifespan, so 168 // it's guaranteed to outlive us. 169 GrVkCommandPool* fCmdPool; 170 171 private: 172 static const int kInitialTrackedResourcesCount = 32; 173 174 void freeGPUData(GrVkGpu* gpu) const final override; 175 virtual void onFreeGPUData(GrVkGpu* gpu) const = 0; 176 void abandonGPUData() const final override; 177 virtual void onAbandonGPUData() const = 0; 178 onReleaseResources(GrVkGpu * gpu)179 virtual void onReleaseResources(GrVkGpu* gpu) {} 180 181 static constexpr uint32_t kMaxInputBuffers = 2; 182 183 VkBuffer fBoundInputBuffers[kMaxInputBuffers]; 184 VkBuffer fBoundIndexBuffer; 185 186 // When resetting the command buffer, we remove the tracked resources from their arrays, and 187 // we prefer to not free all the memory every time so usually we just rewind. However, to avoid 188 // all arrays growing to the max size, after so many resets we'll do a full reset of the tracked 189 // resource arrays. 190 static const int kNumRewindResetsBeforeFullReset = 8; 191 int fNumResets; 192 193 // Cached values used for dynamic state updates 194 VkViewport fCachedViewport; 195 VkRect2D fCachedScissor; 196 float fCachedBlendConstant[4]; 197 198 #ifdef SK_DEBUG 199 mutable bool fResourcesReleased = false; 200 #endif 201 }; 202 203 class GrVkSecondaryCommandBuffer; 204 205 class GrVkPrimaryCommandBuffer : public GrVkCommandBuffer { 206 public: 207 ~GrVkPrimaryCommandBuffer() override; 208 209 static GrVkPrimaryCommandBuffer* Create(const GrVkGpu* gpu, GrVkCommandPool* cmdPool); 210 211 void begin(const GrVkGpu* gpu); 212 void end(GrVkGpu* gpu); 213 214 // Begins render pass on this command buffer. The framebuffer from GrVkRenderTarget will be used 215 // in the render pass. 216 void beginRenderPass(const GrVkGpu* gpu, 217 const GrVkRenderPass* renderPass, 218 const VkClearValue clearValues[], 219 const GrVkRenderTarget& target, 220 const SkIRect& bounds, 221 bool forSecondaryCB); 222 void endRenderPass(const GrVkGpu* gpu); 223 224 // Submits the SecondaryCommandBuffer into this command buffer. It is required that we are 225 // currently inside a render pass that is compatible with the one used to create the 226 // SecondaryCommandBuffer. 227 void executeCommands(const GrVkGpu* gpu, 228 GrVkSecondaryCommandBuffer* secondaryBuffer); 229 230 // Commands that only work outside of a render pass 231 void clearColorImage(const GrVkGpu* gpu, 232 GrVkImage* image, 233 const VkClearColorValue* color, 234 uint32_t subRangeCount, 235 const VkImageSubresourceRange* subRanges); 236 237 void clearDepthStencilImage(const GrVkGpu* gpu, 238 GrVkImage* image, 239 const VkClearDepthStencilValue* color, 240 uint32_t subRangeCount, 241 const VkImageSubresourceRange* subRanges); 242 243 void copyImage(const GrVkGpu* gpu, 244 GrVkImage* srcImage, 245 VkImageLayout srcLayout, 246 GrVkImage* dstImage, 247 VkImageLayout dstLayout, 248 uint32_t copyRegionCount, 249 const VkImageCopy* copyRegions); 250 251 void blitImage(const GrVkGpu* gpu, 252 const GrVkResource* srcResource, 253 VkImage srcImage, 254 VkImageLayout srcLayout, 255 const GrVkResource* dstResource, 256 VkImage dstImage, 257 VkImageLayout dstLayout, 258 uint32_t blitRegionCount, 259 const VkImageBlit* blitRegions, 260 VkFilter filter); 261 262 void blitImage(const GrVkGpu* gpu, 263 const GrVkImage& srcImage, 264 const GrVkImage& dstImage, 265 uint32_t blitRegionCount, 266 const VkImageBlit* blitRegions, 267 VkFilter filter); 268 269 void copyImageToBuffer(const GrVkGpu* gpu, 270 GrVkImage* srcImage, 271 VkImageLayout srcLayout, 272 GrVkTransferBuffer* dstBuffer, 273 uint32_t copyRegionCount, 274 const VkBufferImageCopy* copyRegions); 275 276 void copyBufferToImage(const GrVkGpu* gpu, 277 GrVkTransferBuffer* srcBuffer, 278 GrVkImage* dstImage, 279 VkImageLayout dstLayout, 280 uint32_t copyRegionCount, 281 const VkBufferImageCopy* copyRegions); 282 283 void copyBuffer(GrVkGpu* gpu, 284 GrVkBuffer* srcBuffer, 285 GrVkBuffer* dstBuffer, 286 uint32_t regionCount, 287 const VkBufferCopy* regions); 288 289 void updateBuffer(GrVkGpu* gpu, 290 GrVkBuffer* dstBuffer, 291 VkDeviceSize dstOffset, 292 VkDeviceSize dataSize, 293 const void* data); 294 295 void resolveImage(GrVkGpu* gpu, 296 const GrVkImage& srcImage, 297 const GrVkImage& dstImage, 298 uint32_t regionCount, 299 const VkImageResolve* regions); 300 301 void submitToQueue(const GrVkGpu* gpu, VkQueue queue, GrVkGpu::SyncQueue sync, 302 SkTArray<GrVkSemaphore::Resource*>& signalSemaphores, 303 SkTArray<GrVkSemaphore::Resource*>& waitSemaphores); 304 bool finished(const GrVkGpu* gpu); 305 306 void addFinishedProc(sk_sp<GrRefCntedCallback> finishedProc); 307 308 void recycleSecondaryCommandBuffers(); 309 310 #ifdef SK_TRACE_VK_RESOURCES dumpInfo()311 void dumpInfo() const override { 312 SkDebugf("GrVkPrimaryCommandBuffer: %d (%d refs)\n", fCmdBuffer, this->getRefCnt()); 313 } 314 #endif 315 316 private: GrVkPrimaryCommandBuffer(VkCommandBuffer cmdBuffer,GrVkCommandPool * cmdPool)317 explicit GrVkPrimaryCommandBuffer(VkCommandBuffer cmdBuffer, GrVkCommandPool* cmdPool) 318 : INHERITED(cmdBuffer, cmdPool) 319 , fSubmitFence(VK_NULL_HANDLE) {} 320 321 void onFreeGPUData(GrVkGpu* gpu) const override; 322 323 void onAbandonGPUData() const override; 324 325 void onReleaseResources(GrVkGpu* gpu) override; 326 327 SkTArray<GrVkSecondaryCommandBuffer*, true> fSecondaryCommandBuffers; 328 VkFence fSubmitFence; 329 SkTArray<sk_sp<GrRefCntedCallback>> fFinishedProcs; 330 331 typedef GrVkCommandBuffer INHERITED; 332 }; 333 334 class GrVkSecondaryCommandBuffer : public GrVkCommandBuffer { 335 public: 336 static GrVkSecondaryCommandBuffer* Create(const GrVkGpu* gpu, GrVkCommandPool* cmdPool); 337 // Used for wrapping an external secondary command buffer. 338 static GrVkSecondaryCommandBuffer* Create(VkCommandBuffer externalSecondaryCB); 339 340 void begin(const GrVkGpu* gpu, const GrVkFramebuffer* framebuffer, 341 const GrVkRenderPass* compatibleRenderPass); 342 void end(GrVkGpu* gpu); 343 vkCommandBuffer()344 VkCommandBuffer vkCommandBuffer() { return fCmdBuffer; } 345 346 #ifdef SK_TRACE_VK_RESOURCES dumpInfo()347 void dumpInfo() const override { 348 SkDebugf("GrVkSecondaryCommandBuffer: %d (%d refs)\n", fCmdBuffer, this->getRefCnt()); 349 } 350 #endif 351 352 private: GrVkSecondaryCommandBuffer(VkCommandBuffer cmdBuffer,GrVkCommandPool * cmdPool)353 explicit GrVkSecondaryCommandBuffer(VkCommandBuffer cmdBuffer, GrVkCommandPool* cmdPool) 354 : INHERITED(cmdBuffer, cmdPool) {} 355 onFreeGPUData(GrVkGpu * gpu)356 void onFreeGPUData(GrVkGpu* gpu) const override {} 357 onAbandonGPUData()358 void onAbandonGPUData() const override {} 359 360 friend class GrVkPrimaryCommandBuffer; 361 362 typedef GrVkCommandBuffer INHERITED; 363 }; 364 365 #endif 366