1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrVkGpu_DEFINED 9 #define GrVkGpu_DEFINED 10 11 #include "GrGpu.h" 12 #include "GrGpuFactory.h" 13 #include "vk/GrVkBackendContext.h" 14 #include "GrVkCaps.h" 15 #include "GrVkCopyManager.h" 16 #include "GrVkIndexBuffer.h" 17 #include "GrVkMemory.h" 18 #include "GrVkResourceProvider.h" 19 #include "GrVkSemaphore.h" 20 #include "GrVkVertexBuffer.h" 21 #include "GrVkUtil.h" 22 #include "vk/GrVkDefines.h" 23 24 class GrPipeline; 25 class GrNonInstancedMesh; 26 27 class GrVkBufferImpl; 28 class GrVkPipeline; 29 class GrVkPipelineState; 30 class GrVkPrimaryCommandBuffer; 31 class GrVkRenderPass; 32 class GrVkSecondaryCommandBuffer; 33 class GrVkTexture; 34 struct GrVkInterface; 35 36 namespace SkSL { 37 class Compiler; 38 } 39 40 class GrVkGpu : public GrGpu { 41 public: 42 static GrGpu* Create(GrBackendContext backendContext, const GrContextOptions& options, 43 GrContext* context); 44 45 ~GrVkGpu() override; 46 vkInterface()47 const GrVkInterface* vkInterface() const { return fBackendContext->fInterface.get(); } vkCaps()48 const GrVkCaps& vkCaps() const { return *fVkCaps; } 49 device()50 VkDevice device() const { return fDevice; } queue()51 VkQueue queue() const { return fQueue; } cmdPool()52 VkCommandPool cmdPool() const { return fCmdPool; } physicalDeviceMemoryProperties()53 VkPhysicalDeviceMemoryProperties physicalDeviceMemoryProperties() const { 54 return fPhysDevMemProps; 55 } 56 resourceProvider()57 GrVkResourceProvider& resourceProvider() { return fResourceProvider; } 58 currentCommandBuffer()59 GrVkPrimaryCommandBuffer* currentCommandBuffer() { return fCurrentCmdBuffer; } 60 61 enum SyncQueue { 62 kForce_SyncQueue, 63 kSkip_SyncQueue 64 }; 65 66 bool onGetReadPixelsInfo(GrSurface* srcSurface, int readWidth, int readHeight, size_t rowBytes, 67 GrPixelConfig readConfig, DrawPreference*, 68 ReadPixelTempDrawInfo*) override; 69 70 bool onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height, 71 GrPixelConfig srcConfig, DrawPreference*, 72 WritePixelTempDrawInfo*) override; 73 74 bool onCopySurface(GrSurface* dst, 75 GrSurface* src, 76 const SkIRect& srcRect, 77 const SkIPoint& dstPoint) override; 78 79 void onQueryMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&, 80 int* effectiveSampleCnt, SamplePattern*) override; 81 xferBarrier(GrRenderTarget *,GrXferBarrierType)82 void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {} 83 84 GrBackendObject createTestingOnlyBackendTexture(void* pixels, int w, int h, 85 GrPixelConfig config, 86 bool isRenderTarget) override; 87 bool isTestingOnlyBackendTexture(GrBackendObject id) const override; 88 void deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandonTexture) override; 89 90 GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*, 91 int width, 92 int height) override; 93 94 void clearStencil(GrRenderTarget* target) override; 95 96 GrGpuCommandBuffer* createCommandBuffer( 97 const GrGpuCommandBuffer::LoadAndStoreInfo& colorInfo, 98 const GrGpuCommandBuffer::LoadAndStoreInfo& stencilInfo) override; 99 drawDebugWireRect(GrRenderTarget *,const SkIRect &,GrColor)100 void drawDebugWireRect(GrRenderTarget*, const SkIRect&, GrColor) override {} 101 102 void addMemoryBarrier(VkPipelineStageFlags srcStageMask, 103 VkPipelineStageFlags dstStageMask, 104 bool byRegion, 105 VkMemoryBarrier* barrier) const; 106 void addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask, 107 VkPipelineStageFlags dstStageMask, 108 bool byRegion, 109 VkBufferMemoryBarrier* barrier) const; 110 void addImageMemoryBarrier(VkPipelineStageFlags srcStageMask, 111 VkPipelineStageFlags dstStageMask, 112 bool byRegion, 113 VkImageMemoryBarrier* barrier) const; 114 shaderCompiler()115 SkSL::Compiler* shaderCompiler() const { 116 return fCompiler; 117 } 118 onResolveRenderTarget(GrRenderTarget * target)119 void onResolveRenderTarget(GrRenderTarget* target) override { 120 this->internalResolveRenderTarget(target, true); 121 } 122 123 void submitSecondaryCommandBuffer(const SkTArray<GrVkSecondaryCommandBuffer*>&, 124 const GrVkRenderPass*, 125 const VkClearValue*, 126 GrVkRenderTarget*, 127 const SkIRect& bounds); 128 129 void finishOpList() override; 130 131 GrFence SK_WARN_UNUSED_RESULT insertFence() override; 132 bool waitFence(GrFence, uint64_t timeout) override; 133 void deleteFence(GrFence) const override; 134 135 sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore() override; 136 void insertSemaphore(sk_sp<GrSemaphore> semaphore) override; 137 void waitSemaphore(sk_sp<GrSemaphore> semaphore) override; 138 139 void flush() override; 140 141 void generateMipmap(GrVkTexture* tex); 142 143 bool updateBuffer(GrVkBuffer* buffer, const void* src, VkDeviceSize offset, VkDeviceSize size); 144 145 // Heaps 146 enum Heap { 147 kLinearImage_Heap = 0, 148 // We separate out small (i.e., <= 16K) images to reduce fragmentation 149 // in the main heap. 150 kOptimalImage_Heap, 151 kSmallOptimalImage_Heap, 152 // We have separate vertex and image heaps, because it's possible that 153 // a given Vulkan driver may allocate them separately. 154 kVertexBuffer_Heap, 155 kIndexBuffer_Heap, 156 kUniformBuffer_Heap, 157 kCopyReadBuffer_Heap, 158 kCopyWriteBuffer_Heap, 159 160 kLastHeap = kCopyWriteBuffer_Heap 161 }; 162 static const int kHeapCount = kLastHeap + 1; 163 getHeap(Heap heap)164 GrVkHeap* getHeap(Heap heap) const { return fHeaps[heap].get(); } 165 166 private: 167 GrVkGpu(GrContext* context, const GrContextOptions& options, 168 const GrVkBackendContext* backendContext); 169 onResetContext(uint32_t resetBits)170 void onResetContext(uint32_t resetBits) override {} 171 172 GrTexture* onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted, 173 const SkTArray<GrMipLevel>&) override; 174 onCreateCompressedTexture(const GrSurfaceDesc & desc,SkBudgeted,const SkTArray<GrMipLevel> &)175 GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc, SkBudgeted, 176 const SkTArray<GrMipLevel>&) override { return NULL; } 177 178 sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTextureDesc&, GrWrapOwnership) override; 179 180 sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTargetDesc&) override; onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc &)181 sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc&) override { 182 return nullptr; 183 } 184 185 GrBuffer* onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern, 186 const void* data) override; 187 onCreateInstancedRendering()188 gr_instanced::InstancedRendering* onCreateInstancedRendering() override { return nullptr; } 189 190 bool onReadPixels(GrSurface* surface, 191 int left, int top, int width, int height, 192 GrPixelConfig, 193 void* buffer, 194 size_t rowBytes) override; 195 196 bool onWritePixels(GrSurface* surface, 197 int left, int top, int width, int height, 198 GrPixelConfig config, const SkTArray<GrMipLevel>&) override; 199 onTransferPixels(GrSurface *,int left,int top,int width,int height,GrPixelConfig config,GrBuffer * transferBuffer,size_t offset,size_t rowBytes)200 bool onTransferPixels(GrSurface*, 201 int left, int top, int width, int height, 202 GrPixelConfig config, GrBuffer* transferBuffer, 203 size_t offset, size_t rowBytes) override { return false; } 204 205 // Ends and submits the current command buffer to the queue and then creates a new command 206 // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all 207 // work in the queue to finish before returning. If the signalSemaphore is not VK_NULL_HANDLE, 208 // we will signal the semaphore at the end of this command buffer. If this GrVkGpu object has 209 // any semaphores in fSemaphoresToWaitOn, we will add those wait semaphores to this command 210 // buffer when submitting. 211 void submitCommandBuffer(SyncQueue sync, 212 const GrVkSemaphore::Resource* signalSemaphore = nullptr); 213 214 void internalResolveRenderTarget(GrRenderTarget* target, bool requiresSubmit); 215 216 void copySurfaceAsCopyImage(GrSurface* dst, 217 GrSurface* src, 218 GrVkImage* dstImage, 219 GrVkImage* srcImage, 220 const SkIRect& srcRect, 221 const SkIPoint& dstPoint); 222 223 void copySurfaceAsBlit(GrSurface* dst, 224 GrSurface* src, 225 GrVkImage* dstImage, 226 GrVkImage* srcImage, 227 const SkIRect& srcRect, 228 const SkIPoint& dstPoint); 229 230 void copySurfaceAsResolve(GrSurface* dst, 231 GrSurface* src, 232 const SkIRect& srcRect, 233 const SkIPoint& dstPoint); 234 235 // helpers for onCreateTexture and writeTexturePixels 236 bool uploadTexDataLinear(GrVkTexture* tex, 237 int left, int top, int width, int height, 238 GrPixelConfig dataConfig, 239 const void* data, 240 size_t rowBytes); 241 bool uploadTexDataOptimal(GrVkTexture* tex, 242 int left, int top, int width, int height, 243 GrPixelConfig dataConfig, 244 const SkTArray<GrMipLevel>&); 245 246 void resolveImage(GrVkRenderTarget* dst, 247 GrVkRenderTarget* src, 248 const SkIRect& srcRect, 249 const SkIPoint& dstPoint); 250 251 sk_sp<const GrVkBackendContext> fBackendContext; 252 sk_sp<GrVkCaps> fVkCaps; 253 254 // These Vulkan objects are provided by the client, and also stored in fBackendContext. 255 // They're copied here for convenient access. 256 VkDevice fDevice; 257 VkQueue fQueue; // Must be Graphics queue 258 259 // Created by GrVkGpu 260 GrVkResourceProvider fResourceProvider; 261 VkCommandPool fCmdPool; 262 263 GrVkPrimaryCommandBuffer* fCurrentCmdBuffer; 264 265 SkSTArray<1, const GrVkSemaphore::Resource*> fSemaphoresToWaitOn; 266 267 VkPhysicalDeviceMemoryProperties fPhysDevMemProps; 268 269 std::unique_ptr<GrVkHeap> fHeaps[kHeapCount]; 270 271 GrVkCopyManager fCopyManager; 272 273 #ifdef SK_ENABLE_VK_LAYERS 274 // For reporting validation layer errors 275 VkDebugReportCallbackEXT fCallback; 276 #endif 277 278 // compiler used for compiling sksl into spirv. We only want to create the compiler once since 279 // there is significant overhead to the first compile of any compiler. 280 SkSL::Compiler* fCompiler; 281 282 typedef GrGpu INHERITED; 283 }; 284 285 #endif 286