1 /* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrVkGpu_DEFINED 9 #define GrVkGpu_DEFINED 10 11 #include "GrGpu.h" 12 #include "GrVkCaps.h" 13 #include "GrVkCopyManager.h" 14 #include "GrVkIndexBuffer.h" 15 #include "GrVkMemory.h" 16 #include "GrVkResourceProvider.h" 17 #include "GrVkSemaphore.h" 18 #include "GrVkVertexBuffer.h" 19 #include "GrVkUtil.h" 20 #include "vk/GrVkBackendContext.h" 21 #include "vk/GrVkTypes.h" 22 23 class GrPipeline; 24 25 class GrVkBufferImpl; 26 class GrVkCommandPool; 27 class GrVkGpuRTCommandBuffer; 28 class GrVkGpuTextureCommandBuffer; 29 class GrVkMemoryAllocator; 30 class GrVkPipeline; 31 class GrVkPipelineState; 32 class GrVkPrimaryCommandBuffer; 33 class GrVkRenderPass; 34 class GrVkSecondaryCommandBuffer; 35 class GrVkTexture; 36 struct GrVkInterface; 37 38 namespace SkSL { 39 class Compiler; 40 } 41 42 class GrVkGpu : public GrGpu { 43 public: 44 static sk_sp<GrGpu> Make(const GrVkBackendContext&, const GrContextOptions&, GrContext*); 45 46 ~GrVkGpu() override; 47 48 void disconnect(DisconnectType) override; 49 vkInterface()50 const GrVkInterface* vkInterface() const { return fInterface.get(); } vkCaps()51 const GrVkCaps& vkCaps() const { return *fVkCaps; } 52 memoryAllocator()53 GrVkMemoryAllocator* memoryAllocator() const { return fMemoryAllocator.get(); } 54 physicalDevice()55 VkPhysicalDevice physicalDevice() const { return fPhysicalDevice; } device()56 VkDevice device() const { return fDevice; } queue()57 VkQueue queue() const { return fQueue; } queueIndex()58 uint32_t queueIndex() const { return fQueueIndex; } cmdPool()59 GrVkCommandPool* cmdPool() const { return fCmdPool; } physicalDeviceProperties()60 const VkPhysicalDeviceProperties& physicalDeviceProperties() const { 61 return fPhysDevProps; 62 } physicalDeviceMemoryProperties()63 const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProperties() const { 64 return fPhysDevMemProps; 65 } 66 resourceProvider()67 GrVkResourceProvider& resourceProvider() { return fResourceProvider; } 68 currentCommandBuffer()69 GrVkPrimaryCommandBuffer* currentCommandBuffer() { return fCurrentCmdBuffer; } 70 71 enum SyncQueue { 72 kForce_SyncQueue, 73 kSkip_SyncQueue 74 }; 75 xferBarrier(GrRenderTarget *,GrXferBarrierType)76 void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {} 77 78 #if GR_TEST_UTILS 79 GrBackendTexture createTestingOnlyBackendTexture(const void* pixels, int w, int h, 80 GrColorType colorType, bool isRenderTarget, 81 GrMipMapped, size_t rowBytes = 0) override; 82 bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override; 83 void deleteTestingOnlyBackendTexture(const GrBackendTexture&) override; 84 85 GrBackendRenderTarget createTestingOnlyBackendRenderTarget(int w, int h, GrColorType) override; 86 void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override; 87 88 void testingOnly_flushGpuAndSync() override; 89 #endif 90 91 GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*, 92 int width, 93 int height) override; 94 95 GrGpuRTCommandBuffer* getCommandBuffer( 96 GrRenderTarget*, GrSurfaceOrigin, const SkRect&, 97 const GrGpuRTCommandBuffer::LoadAndStoreInfo&, 98 const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo&) override; 99 100 GrGpuTextureCommandBuffer* getCommandBuffer(GrTexture*, GrSurfaceOrigin) override; 101 102 103 void addMemoryBarrier(VkPipelineStageFlags srcStageMask, 104 VkPipelineStageFlags dstStageMask, 105 bool byRegion, 106 VkMemoryBarrier* barrier) const; 107 void addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask, 108 VkPipelineStageFlags dstStageMask, 109 bool byRegion, 110 VkBufferMemoryBarrier* barrier) const; 111 void addImageMemoryBarrier(VkPipelineStageFlags srcStageMask, 112 VkPipelineStageFlags dstStageMask, 113 bool byRegion, 114 VkImageMemoryBarrier* barrier) const; 115 shaderCompiler()116 SkSL::Compiler* shaderCompiler() const { 117 return fCompiler; 118 } 119 120 bool onRegenerateMipMapLevels(GrTexture* tex) override; 121 resolveRenderTargetNoFlush(GrRenderTarget * target)122 void resolveRenderTargetNoFlush(GrRenderTarget* target) { 123 this->internalResolveRenderTarget(target, false); 124 } 125 onResolveRenderTarget(GrRenderTarget * target)126 void onResolveRenderTarget(GrRenderTarget* target) override { 127 // This resolve is called when we are preparing an msaa surface for external I/O. It is 128 // called after flushing, so we need to make sure we submit the command buffer after doing 129 // the resolve so that the resolve actually happens. 130 this->internalResolveRenderTarget(target, true); 131 } 132 133 void submitSecondaryCommandBuffer(const SkTArray<GrVkSecondaryCommandBuffer*>&, 134 const GrVkRenderPass*, 135 const VkClearValue* colorClear, 136 GrVkRenderTarget*, GrSurfaceOrigin, 137 const SkIRect& bounds); 138 139 void submit(GrGpuCommandBuffer*) override; 140 141 GrFence SK_WARN_UNUSED_RESULT insertFence() override; 142 bool waitFence(GrFence, uint64_t timeout) override; 143 void deleteFence(GrFence) const override; 144 145 sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override; 146 sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore, 147 GrResourceProvider::SemaphoreWrapType wrapType, 148 GrWrapOwnership ownership) override; 149 void insertSemaphore(sk_sp<GrSemaphore> semaphore) override; 150 void waitSemaphore(sk_sp<GrSemaphore> semaphore) override; 151 152 // These match the definitions in SkDrawable, from whence they came 153 typedef void* SubmitContext; 154 typedef void (*SubmitProc)(SubmitContext submitContext); 155 156 // Adds an SkDrawable::GpuDrawHandler that we will delete the next time we submit the primary 157 // command buffer to the gpu. 158 void addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable); 159 160 sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override; 161 162 void copyBuffer(GrVkBuffer* srcBuffer, GrVkBuffer* dstBuffer, VkDeviceSize srcOffset, 163 VkDeviceSize dstOffset, VkDeviceSize size); 164 bool updateBuffer(GrVkBuffer* buffer, const void* src, VkDeviceSize offset, VkDeviceSize size); 165 166 uint32_t getExtraSamplerKeyForProgram(const GrSamplerState&, 167 const GrBackendFormat& format) override; 168 169 enum PersistentCacheKeyType : uint32_t { 170 kShader_PersistentCacheKeyType = 0, 171 kPipelineCache_PersistentCacheKeyType = 1, 172 }; 173 174 void storeVkPipelineCacheData() override; 175 176 private: 177 GrVkGpu(GrContext*, const GrContextOptions&, const GrVkBackendContext&, 178 sk_sp<const GrVkInterface>, uint32_t instanceVersion, uint32_t physicalDeviceVersion); 179 onResetContext(uint32_t resetBits)180 void onResetContext(uint32_t resetBits) override {} 181 182 void destroyResources(); 183 184 sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc&, SkBudgeted, const GrMipLevel[], 185 int mipLevelCount) override; 186 187 sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, GrWrapOwnership, GrWrapCacheable, 188 GrIOType) override; 189 sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&, 190 int sampleCnt, 191 GrWrapOwnership, 192 GrWrapCacheable) override; 193 sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) override; 194 195 sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&, 196 int sampleCnt) override; 197 198 sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, 199 const GrVkDrawableInfo&) override; 200 201 sk_sp<GrBuffer> onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern, 202 const void* data) override; 203 204 bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType, 205 void* buffer, size_t rowBytes) override; 206 207 bool onWritePixels(GrSurface* surface, int left, int top, int width, int height, GrColorType, 208 const GrMipLevel texels[], int mipLevelCount) override; 209 210 bool onTransferPixels(GrTexture*, int left, int top, int width, int height, GrColorType, 211 GrBuffer* transferBuffer, size_t offset, size_t rowBytes) override; 212 213 bool onCopySurface(GrSurface* dst, GrSurfaceOrigin dstOrigin, GrSurface* src, 214 GrSurfaceOrigin srcOrigin, const SkIRect& srcRect, 215 const SkIPoint& dstPoint, bool canDiscardOutsideDstRect) override; 216 217 void onFinishFlush(bool insertedSemaphores) override; 218 219 // Ends and submits the current command buffer to the queue and then creates a new command 220 // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all 221 // work in the queue to finish before returning. If this GrVkGpu object has any semaphores in 222 // fSemaphoreToSignal, we will add those signal semaphores to the submission of this command 223 // buffer. If this GrVkGpu object has any semaphores in fSemaphoresToWaitOn, we will add those 224 // wait semaphores to the submission of this command buffer. 225 void submitCommandBuffer(SyncQueue sync); 226 227 void internalResolveRenderTarget(GrRenderTarget*, bool requiresSubmit); 228 229 void copySurfaceAsCopyImage(GrSurface* dst, GrSurfaceOrigin dstOrigin, 230 GrSurface* src, GrSurfaceOrigin srcOrigin, 231 GrVkImage* dstImage, GrVkImage* srcImage, 232 const SkIRect& srcRect, 233 const SkIPoint& dstPoint); 234 235 void copySurfaceAsBlit(GrSurface* dst, GrSurfaceOrigin dstOrigin, 236 GrSurface* src, GrSurfaceOrigin srcOrigin, 237 GrVkImage* dstImage, GrVkImage* srcImage, 238 const SkIRect& srcRect, 239 const SkIPoint& dstPoint); 240 241 void copySurfaceAsResolve(GrSurface* dst, GrSurfaceOrigin dstOrigin, 242 GrSurface* src, GrSurfaceOrigin srcOrigin, 243 const SkIRect& srcRect, 244 const SkIPoint& dstPoint); 245 246 // helpers for onCreateTexture and writeTexturePixels 247 bool uploadTexDataLinear(GrVkTexture* tex, int left, int top, int width, int height, 248 GrColorType colorType, const void* data, size_t rowBytes); 249 bool uploadTexDataOptimal(GrVkTexture* tex, int left, int top, int width, int height, 250 GrColorType colorType, const GrMipLevel texels[], int mipLevelCount); 251 bool uploadTexDataCompressed(GrVkTexture* tex, int left, int top, int width, int height, 252 GrColorType dataColorType, const GrMipLevel texels[], 253 int mipLevelCount); 254 void resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect, 255 const SkIPoint& dstPoint); 256 257 #if GR_TEST_UTILS 258 bool createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool texturable, 259 bool renderable, GrMipMapped mipMapped, const void* srcData, 260 size_t srcRowBytes, GrVkImageInfo* info); 261 #endif 262 263 sk_sp<const GrVkInterface> fInterface; 264 sk_sp<GrVkMemoryAllocator> fMemoryAllocator; 265 sk_sp<GrVkCaps> fVkCaps; 266 267 VkInstance fInstance; 268 VkPhysicalDevice fPhysicalDevice; 269 VkDevice fDevice; 270 VkQueue fQueue; // Must be Graphics queue 271 uint32_t fQueueIndex; 272 273 // Created by GrVkGpu 274 GrVkResourceProvider fResourceProvider; 275 276 GrVkCommandPool* fCmdPool; 277 278 // just a raw pointer; object's lifespan is managed by fCmdPool 279 GrVkPrimaryCommandBuffer* fCurrentCmdBuffer; 280 281 SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToWaitOn; 282 SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToSignal; 283 284 SkTArray<std::unique_ptr<SkDrawable::GpuDrawHandler>> fDrawables; 285 286 VkPhysicalDeviceProperties fPhysDevProps; 287 VkPhysicalDeviceMemoryProperties fPhysDevMemProps; 288 289 GrVkCopyManager fCopyManager; 290 291 // compiler used for compiling sksl into spirv. We only want to create the compiler once since 292 // there is significant overhead to the first compile of any compiler. 293 SkSL::Compiler* fCompiler; 294 295 // We need a bool to track whether or not we've already disconnected all the gpu resources from 296 // vulkan context. 297 bool fDisconnected; 298 299 std::unique_ptr<GrVkGpuRTCommandBuffer> fCachedRTCommandBuffer; 300 std::unique_ptr<GrVkGpuTextureCommandBuffer> fCachedTexCommandBuffer; 301 302 typedef GrGpu INHERITED; 303 }; 304 305 #endif 306