1 /*
2  * Copyright 2015 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "GrVkImage.h"
9 #include "GrGpuResourcePriv.h"
10 #include "GrVkGpu.h"
11 #include "GrVkMemory.h"
12 #include "GrVkTexture.h"
13 #include "GrVkUtil.h"
14 
15 #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
16 
LayoutToPipelineSrcStageFlags(const VkImageLayout layout)17 VkPipelineStageFlags GrVkImage::LayoutToPipelineSrcStageFlags(const VkImageLayout layout) {
18     if (VK_IMAGE_LAYOUT_GENERAL == layout) {
19         return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
20     } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
21                VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
22         return VK_PIPELINE_STAGE_TRANSFER_BIT;
23     } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
24         return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
25     } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
26                VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout) {
27         return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
28     } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
29         return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
30     } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
31         return VK_PIPELINE_STAGE_HOST_BIT;
32     } else if (VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
33         return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
34     }
35 
36     SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
37     return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
38 }
39 
LayoutToSrcAccessMask(const VkImageLayout layout)40 VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
41     // Currently we assume we will never being doing any explict shader writes (this doesn't include
42     // color attachment or depth/stencil writes). So we will ignore the
43     // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
44 
45     // We can only directly access the host memory if we are in preinitialized or general layout,
46     // and the image is linear.
47     // TODO: Add check for linear here so we are not always adding host to general, and we should
48     //       only be in preinitialized if we are linear
49     VkAccessFlags flags = 0;
50     if (VK_IMAGE_LAYOUT_GENERAL == layout) {
51         flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
52                 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
53                 VK_ACCESS_TRANSFER_WRITE_BIT |
54                 VK_ACCESS_TRANSFER_READ_BIT |
55                 VK_ACCESS_SHADER_READ_BIT |
56                 VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
57     } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
58         flags = VK_ACCESS_HOST_WRITE_BIT;
59     } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
60         flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
61     } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
62         flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
63     } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
64         flags = VK_ACCESS_TRANSFER_WRITE_BIT;
65     } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
66                VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout ||
67                VK_IMAGE_LAYOUT_PRESENT_SRC_KHR == layout) {
68         // There are no writes that need to be made available
69         flags = 0;
70     }
71     return flags;
72 }
73 
vk_format_to_aspect_flags(VkFormat format)74 VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
75     switch (format) {
76         case VK_FORMAT_S8_UINT:
77             return VK_IMAGE_ASPECT_STENCIL_BIT;
78         case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough
79         case VK_FORMAT_D32_SFLOAT_S8_UINT:
80             return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
81         default:
82             SkASSERT(GrVkFormatIsSupported(format));
83             return VK_IMAGE_ASPECT_COLOR_BIT;
84     }
85 }
86 
setImageLayout(const GrVkGpu * gpu,VkImageLayout newLayout,VkAccessFlags dstAccessMask,VkPipelineStageFlags dstStageMask,bool byRegion,bool releaseFamilyQueue)87 void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout,
88                                VkAccessFlags dstAccessMask,
89                                VkPipelineStageFlags dstStageMask,
90                                bool byRegion, bool releaseFamilyQueue) {
91     SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED != newLayout &&
92              VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout);
93     VkImageLayout currentLayout = this->currentLayout();
94 
95     if (releaseFamilyQueue && fInfo.fCurrentQueueFamily == fInitialQueueFamily &&
96         newLayout == currentLayout) {
97         // We never transfered the image to this queue and we are releasing it so don't do anything.
98         return;
99     }
100 
101     // If the old and new layout are the same and the layout is a read only layout, there is no need
102     // to put in a barrier.
103     if (newLayout == currentLayout &&
104         !releaseFamilyQueue &&
105         (VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == currentLayout ||
106          VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == currentLayout ||
107          VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == currentLayout)) {
108         return;
109     }
110 
111     VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
112     VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineSrcStageFlags(currentLayout);
113 
114     VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
115 
116     uint32_t srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
117     uint32_t dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
118     if (fInfo.fCurrentQueueFamily != VK_QUEUE_FAMILY_IGNORED &&
119         gpu->queueIndex() != fInfo.fCurrentQueueFamily) {
120         // The image still is owned by its original queue family and we need to transfer it into
121         // ours.
122         SkASSERT(!releaseFamilyQueue);
123         SkASSERT(fInfo.fCurrentQueueFamily == fInitialQueueFamily);
124 
125         srcQueueFamilyIndex = fInfo.fCurrentQueueFamily;
126         dstQueueFamilyIndex = gpu->queueIndex();
127         fInfo.fCurrentQueueFamily = gpu->queueIndex();
128     } else if (releaseFamilyQueue) {
129         // We are releasing the image so we must transfer the image back to its original queue
130         // family.
131         SkASSERT(fInfo.fCurrentQueueFamily == gpu->queueIndex());
132         srcQueueFamilyIndex = fInfo.fCurrentQueueFamily;
133         dstQueueFamilyIndex = fInitialQueueFamily;
134         fInfo.fCurrentQueueFamily = fInitialQueueFamily;
135     }
136 
137     VkImageMemoryBarrier imageMemoryBarrier = {
138         VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,          // sType
139         nullptr,                                         // pNext
140         srcAccessMask,                                   // outputMask
141         dstAccessMask,                                   // inputMask
142         currentLayout,                                   // oldLayout
143         newLayout,                                       // newLayout
144         srcQueueFamilyIndex,                             // srcQueueFamilyIndex
145         dstQueueFamilyIndex,                             // dstQueueFamilyIndex
146         fInfo.fImage,                                    // image
147         { aspectFlags, 0, fInfo.fLevelCount, 0, 1 }      // subresourceRange
148     };
149 
150     gpu->addImageMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
151                                &imageMemoryBarrier);
152 
153     this->updateImageLayout(newLayout);
154 }
155 
InitImageInfo(const GrVkGpu * gpu,const ImageDesc & imageDesc,GrVkImageInfo * info)156 bool GrVkImage::InitImageInfo(const GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImageInfo* info) {
157     if (0 == imageDesc.fWidth || 0 == imageDesc.fHeight) {
158         return false;
159     }
160     VkImage image = 0;
161     GrVkAlloc alloc;
162 
163     bool isLinear = VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling;
164     VkImageLayout initialLayout = isLinear ? VK_IMAGE_LAYOUT_PREINITIALIZED
165                                            : VK_IMAGE_LAYOUT_UNDEFINED;
166 
167     // Create Image
168     VkSampleCountFlagBits vkSamples;
169     if (!GrSampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
170         return false;
171     }
172 
173     SkASSERT(VK_IMAGE_TILING_OPTIMAL == imageDesc.fImageTiling ||
174              VK_SAMPLE_COUNT_1_BIT == vkSamples);
175 
176     const VkImageCreateInfo imageCreateInfo = {
177         VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,         // sType
178         nullptr,                                     // pNext
179         0,                                           // VkImageCreateFlags
180         imageDesc.fImageType,                        // VkImageType
181         imageDesc.fFormat,                           // VkFormat
182         { imageDesc.fWidth, imageDesc.fHeight, 1 },  // VkExtent3D
183         imageDesc.fLevels,                           // mipLevels
184         1,                                           // arrayLayers
185         vkSamples,                                   // samples
186         imageDesc.fImageTiling,                      // VkImageTiling
187         imageDesc.fUsageFlags,                       // VkImageUsageFlags
188         VK_SHARING_MODE_EXCLUSIVE,                   // VkSharingMode
189         0,                                           // queueFamilyCount
190         0,                                           // pQueueFamilyIndices
191         initialLayout                                // initialLayout
192     };
193 
194     GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateImage(gpu->device(), &imageCreateInfo, nullptr,
195                                                         &image));
196 
197     if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, isLinear, &alloc)) {
198         VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
199         return false;
200     }
201 
202     info->fImage = image;
203     info->fAlloc = alloc;
204     info->fImageTiling = imageDesc.fImageTiling;
205     info->fImageLayout = initialLayout;
206     info->fFormat = imageDesc.fFormat;
207     info->fLevelCount = imageDesc.fLevels;
208     info->fCurrentQueueFamily = VK_QUEUE_FAMILY_IGNORED;
209     return true;
210 }
211 
DestroyImageInfo(const GrVkGpu * gpu,GrVkImageInfo * info)212 void GrVkImage::DestroyImageInfo(const GrVkGpu* gpu, GrVkImageInfo* info) {
213     VK_CALL(gpu, DestroyImage(gpu->device(), info->fImage, nullptr));
214     bool isLinear = VK_IMAGE_TILING_LINEAR == info->fImageTiling;
215     GrVkMemory::FreeImageMemory(gpu, isLinear, info->fAlloc);
216 }
217 
~GrVkImage()218 GrVkImage::~GrVkImage() {
219     // should have been released or abandoned first
220     SkASSERT(!fResource);
221 }
222 
prepareForPresent(GrVkGpu * gpu)223 void GrVkImage::prepareForPresent(GrVkGpu* gpu) {
224     VkImageLayout layout = this->currentLayout();
225     if (fInitialQueueFamily != VK_QUEUE_FAMILY_EXTERNAL &&
226         fInitialQueueFamily != VK_QUEUE_FAMILY_FOREIGN_EXT) {
227         if (gpu->vkCaps().supportsSwapchain()) {
228             layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
229         }
230     }
231     this->setImageLayout(gpu, layout, 0, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, false, true);
232 }
233 
releaseImage(GrVkGpu * gpu)234 void GrVkImage::releaseImage(GrVkGpu* gpu) {
235     if (fInfo.fCurrentQueueFamily != fInitialQueueFamily) {
236         // The Vulkan spec is vague on what to put for the dstStageMask here. The spec for image
237         // memory barrier says the dstStageMask must not be zero. However, in the spec when it talks
238         // about family queue transfers it says the dstStageMask is ignored and should be set to
239         // zero. Assuming it really is ignored we set it to VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT here
240         // since it makes the Vulkan validation layers happy.
241         this->setImageLayout(gpu, this->currentLayout(), 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
242                              false, true);
243     }
244     if (fResource) {
245         fResource->removeOwningTexture();
246         fResource->unref(gpu);
247         fResource = nullptr;
248     }
249 }
250 
abandonImage()251 void GrVkImage::abandonImage() {
252     if (fResource) {
253         fResource->removeOwningTexture();
254         fResource->unrefAndAbandon();
255         fResource = nullptr;
256     }
257 }
258 
setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper)259 void GrVkImage::setResourceRelease(sk_sp<GrRefCntedCallback> releaseHelper) {
260     SkASSERT(fResource);
261     // Forward the release proc on to GrVkImage::Resource
262     fResource->setRelease(std::move(releaseHelper));
263 }
264 
freeGPUData(GrVkGpu * gpu) const265 void GrVkImage::Resource::freeGPUData(GrVkGpu* gpu) const {
266     this->invokeReleaseProc();
267     VK_CALL(gpu, DestroyImage(gpu->device(), fImage, nullptr));
268     bool isLinear = (VK_IMAGE_TILING_LINEAR == fImageTiling);
269     GrVkMemory::FreeImageMemory(gpu, isLinear, fAlloc);
270 }
271 
replaceIdleProc(GrVkTexture * owner,sk_sp<GrRefCntedCallback> idleCallback) const272 void GrVkImage::Resource::replaceIdleProc(
273         GrVkTexture* owner, sk_sp<GrRefCntedCallback> idleCallback) const {
274     fOwningTexture = owner;
275     fIdleCallback = std::move(idleCallback);
276 }
277 
removeOwningTexture() const278 void GrVkImage::Resource::removeOwningTexture() const { fOwningTexture = nullptr; }
279 
notifyAddedToCommandBuffer() const280 void GrVkImage::Resource::notifyAddedToCommandBuffer() const { ++fNumCommandBufferOwners; }
281 
notifyRemovedFromCommandBuffer() const282 void GrVkImage::Resource::notifyRemovedFromCommandBuffer() const {
283     SkASSERT(fNumCommandBufferOwners);
284     if (--fNumCommandBufferOwners || !fIdleCallback) {
285         return;
286     }
287     if (fOwningTexture) {
288         if (fOwningTexture->resourcePriv().hasRefOrPendingIO()) {
289             return;
290         }
291         fOwningTexture->removeIdleProc();
292     }
293     fIdleCallback.reset();
294 }
295 
freeGPUData(GrVkGpu * gpu) const296 void GrVkImage::BorrowedResource::freeGPUData(GrVkGpu* gpu) const {
297     this->invokeReleaseProc();
298 }
299 
abandonGPUData() const300 void GrVkImage::BorrowedResource::abandonGPUData() const {
301     this->invokeReleaseProc();
302 }
303 
304 #if GR_TEST_UTILS
setCurrentQueueFamilyToGraphicsQueue(GrVkGpu * gpu)305 void GrVkImage::setCurrentQueueFamilyToGraphicsQueue(GrVkGpu* gpu) {
306     fInfo.fCurrentQueueFamily = gpu->queueIndex();
307 }
308 #endif
309 
310