1 //
2 // Copyright 2018 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // vk_helpers:
7 //   Helper utilitiy classes that manage Vulkan resources.
8 
9 #include "libANGLE/renderer/vulkan/vk_helpers.h"
10 #include "libANGLE/renderer/driver_utils.h"
11 
12 #include "common/utilities.h"
13 #include "image_util/loadimage.h"
14 #include "libANGLE/Context.h"
15 #include "libANGLE/renderer/renderer_utils.h"
16 #include "libANGLE/renderer/vulkan/BufferVk.h"
17 #include "libANGLE/renderer/vulkan/ContextVk.h"
18 #include "libANGLE/renderer/vulkan/DisplayVk.h"
19 #include "libANGLE/renderer/vulkan/FramebufferVk.h"
20 #include "libANGLE/renderer/vulkan/RenderTargetVk.h"
21 #include "libANGLE/renderer/vulkan/RendererVk.h"
22 #include "libANGLE/renderer/vulkan/android/vk_android_utils.h"
23 #include "libANGLE/renderer/vulkan/vk_utils.h"
24 #include "libANGLE/trace.h"
25 
26 namespace rx
27 {
28 namespace vk
29 {
30 namespace
31 {
32 // ANGLE_robust_resource_initialization requires color textures to be initialized to zero.
33 constexpr VkClearColorValue kRobustInitColorValue = {{0, 0, 0, 0}};
34 // When emulating a texture, we want the emulated channels to be 0, with alpha 1.
35 constexpr VkClearColorValue kEmulatedInitColorValue = {{0, 0, 0, 1.0f}};
36 // ANGLE_robust_resource_initialization requires depth to be initialized to 1 and stencil to 0.
37 // We are fine with these values for emulated depth/stencil textures too.
38 constexpr VkClearDepthStencilValue kRobustInitDepthStencilValue = {1.0f, 0};
39 
40 constexpr VkImageAspectFlags kDepthStencilAspects =
41     VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_DEPTH_BIT;
42 
43 constexpr VkBufferUsageFlags kLineLoopDynamicBufferUsage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
44                                                            VK_BUFFER_USAGE_TRANSFER_DST_BIT |
45                                                            VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
46 constexpr int kLineLoopDynamicBufferInitialSize = 1024 * 1024;
47 constexpr VkBufferUsageFlags kLineLoopDynamicIndirectBufferUsage =
48     VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
49     VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
50 constexpr int kLineLoopDynamicIndirectBufferInitialSize = sizeof(VkDrawIndirectCommand) * 16;
51 
52 constexpr angle::PackedEnumMap<PipelineStage, VkPipelineStageFlagBits> kPipelineStageFlagBitMap = {
53     {PipelineStage::TopOfPipe, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT},
54     {PipelineStage::DrawIndirect, VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT},
55     {PipelineStage::VertexInput, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT},
56     {PipelineStage::VertexShader, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT},
57     {PipelineStage::GeometryShader, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT},
58     {PipelineStage::TransformFeedback, VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT},
59     {PipelineStage::EarlyFragmentTest, VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT},
60     {PipelineStage::FragmentShader, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT},
61     {PipelineStage::LateFragmentTest, VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT},
62     {PipelineStage::ColorAttachmentOutput, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT},
63     {PipelineStage::ComputeShader, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT},
64     {PipelineStage::Transfer, VK_PIPELINE_STAGE_TRANSFER_BIT},
65     {PipelineStage::BottomOfPipe, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT},
66     {PipelineStage::Host, VK_PIPELINE_STAGE_HOST_BIT}};
67 
68 constexpr gl::ShaderMap<PipelineStage> kPipelineStageShaderMap = {
69     {gl::ShaderType::Vertex, PipelineStage::VertexShader},
70     {gl::ShaderType::Fragment, PipelineStage::FragmentShader},
71     {gl::ShaderType::Geometry, PipelineStage::GeometryShader},
72     {gl::ShaderType::Compute, PipelineStage::ComputeShader},
73 };
74 
75 constexpr size_t kDefaultPoolAllocatorPageSize = 16 * 1024;
76 
77 struct ImageMemoryBarrierData
78 {
79     char name[44];
80 
81     // The Vk layout corresponding to the ImageLayout key.
82     VkImageLayout layout;
83 
84     // The stage in which the image is used (or Bottom/Top if not using any specific stage).  Unless
85     // Bottom/Top (Bottom used for transition to and Top used for transition from), the two values
86     // should match.
87     VkPipelineStageFlags dstStageMask;
88     VkPipelineStageFlags srcStageMask;
89     // Access mask when transitioning into this layout.
90     VkAccessFlags dstAccessMask;
91     // Access mask when transitioning out from this layout.  Note that source access mask never
92     // needs a READ bit, as WAR hazards don't need memory barriers (just execution barriers).
93     VkAccessFlags srcAccessMask;
94     // Read or write.
95     ResourceAccess type;
96     // CommandBufferHelper tracks an array of PipelineBarriers. This indicates which array element
97     // this should be merged into. Right now we track individual barrier for every PipelineStage. If
98     // layout has a single stage mask bit, we use that stage as index. If layout has multiple stage
99     // mask bits, we pick the lowest stage as the index since it is the first stage that needs
100     // barrier.
101     PipelineStage barrierIndex;
102 };
103 
104 constexpr VkPipelineStageFlags kPreFragmentStageFlags =
105     VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
106     VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT;
107 
108 constexpr VkPipelineStageFlags kAllShadersPipelineStageFlags =
109     kPreFragmentStageFlags | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
110     VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
111 
112 constexpr VkPipelineStageFlags kAllDepthStencilPipelineStageFlags =
113     VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
114 
115 // clang-format off
116 constexpr angle::PackedEnumMap<ImageLayout, ImageMemoryBarrierData> kImageMemoryBarrierData = {
117     {
118         ImageLayout::Undefined,
119         ImageMemoryBarrierData{
120             "Undefined",
121             VK_IMAGE_LAYOUT_UNDEFINED,
122             VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
123             VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
124             // Transition to: we don't expect to transition into Undefined.
125             0,
126             // Transition from: there's no data in the image to care about.
127             0,
128             ResourceAccess::ReadOnly,
129             PipelineStage::InvalidEnum,
130         },
131     },
132     {
133         ImageLayout::ColorAttachment,
134         ImageMemoryBarrierData{
135             "ColorAttachment",
136             VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
137             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
138             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
139             // Transition to: all reads and writes must happen after barrier.
140             VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
141             // Transition from: all writes must finish before barrier.
142             VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
143             ResourceAccess::Write,
144             PipelineStage::ColorAttachmentOutput,
145         },
146     },
147     {
148         ImageLayout::ColorAttachmentAndFragmentShaderRead,
149         ImageMemoryBarrierData{
150             "ColorAttachmentAndFragmentShaderRead",
151             VK_IMAGE_LAYOUT_GENERAL,
152             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
153             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
154             // Transition to: all reads and writes must happen after barrier.
155             VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT,
156             // Transition from: all writes must finish before barrier.
157             VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
158             ResourceAccess::Write,
159             PipelineStage::FragmentShader,
160         },
161     },
162     {
163         ImageLayout::ColorAttachmentAndAllShadersRead,
164         ImageMemoryBarrierData{
165             "ColorAttachmentAndAllShadersRead",
166             VK_IMAGE_LAYOUT_GENERAL,
167             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | kAllShadersPipelineStageFlags,
168             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | kAllShadersPipelineStageFlags,
169             // Transition to: all reads and writes must happen after barrier.
170             VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT,
171             // Transition from: all writes must finish before barrier.
172             VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
173             ResourceAccess::Write,
174             // In case of multiple destination stages, We barrier the earliest stage
175             PipelineStage::VertexShader,
176         },
177     },
178     {
179         ImageLayout::DSAttachmentWriteAndFragmentShaderRead,
180         ImageMemoryBarrierData{
181             "DSAttachmentWriteAndFragmentShaderRead",
182             VK_IMAGE_LAYOUT_GENERAL,
183             kAllDepthStencilPipelineStageFlags | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
184             kAllDepthStencilPipelineStageFlags | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
185             // Transition to: all reads and writes must happen after barrier.
186             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT,
187             // Transition from: all writes must finish before barrier.
188             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
189             ResourceAccess::Write,
190             PipelineStage::FragmentShader,
191         },
192     },
193     {
194         ImageLayout::DSAttachmentWriteAndAllShadersRead,
195         ImageMemoryBarrierData{
196             "DSAttachmentWriteAndAllShadersRead",
197             VK_IMAGE_LAYOUT_GENERAL,
198             kAllDepthStencilPipelineStageFlags | kAllShadersPipelineStageFlags,
199             kAllDepthStencilPipelineStageFlags | kAllShadersPipelineStageFlags,
200             // Transition to: all reads and writes must happen after barrier.
201             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT,
202             // Transition from: all writes must finish before barrier.
203             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
204             ResourceAccess::Write,
205             // In case of multiple destination stages, We barrier the earliest stage
206             PipelineStage::VertexShader,
207         },
208     },
209     {
210         ImageLayout::DSAttachmentReadAndFragmentShaderRead,
211             ImageMemoryBarrierData{
212             "DSAttachmentReadAndFragmentShaderRead",
213             VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
214             VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | kAllDepthStencilPipelineStageFlags,
215             VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | kAllDepthStencilPipelineStageFlags,
216             // Transition to: all reads must happen after barrier.
217             VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
218             // Transition from: RAR and WAR don't need memory barrier.
219             0,
220             ResourceAccess::ReadOnly,
221             PipelineStage::EarlyFragmentTest,
222         },
223     },
224     {
225         ImageLayout::DSAttachmentReadAndAllShadersRead,
226             ImageMemoryBarrierData{
227             "DSAttachmentReadAndAllShadersRead",
228             VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
229             kAllShadersPipelineStageFlags | kAllDepthStencilPipelineStageFlags,
230             kAllShadersPipelineStageFlags | kAllDepthStencilPipelineStageFlags,
231             // Transition to: all reads must happen after barrier.
232             VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
233             // Transition from: RAR and WAR don't need memory barrier.
234             0,
235             ResourceAccess::ReadOnly,
236             PipelineStage::VertexShader,
237         },
238     },
239     {
240         ImageLayout::DepthStencilAttachmentReadOnly,
241             ImageMemoryBarrierData{
242             "DepthStencilAttachmentReadOnly",
243             VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
244             kAllDepthStencilPipelineStageFlags,
245             kAllDepthStencilPipelineStageFlags,
246             // Transition to: all reads must happen after barrier.
247             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
248             // Transition from: RAR and WAR don't need memory barrier.
249             0,
250             ResourceAccess::ReadOnly,
251             PipelineStage::EarlyFragmentTest,
252         },
253     },
254     {
255         ImageLayout::DepthStencilAttachment,
256         ImageMemoryBarrierData{
257             "DepthStencilAttachment",
258             VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
259             kAllDepthStencilPipelineStageFlags,
260             kAllDepthStencilPipelineStageFlags,
261             // Transition to: all reads and writes must happen after barrier.
262             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
263             // Transition from: all writes must finish before barrier.
264             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
265             ResourceAccess::Write,
266             PipelineStage::EarlyFragmentTest,
267         },
268     },
269     {
270         ImageLayout::DepthStencilResolveAttachment,
271         ImageMemoryBarrierData{
272             "DepthStencilResolveAttachment",
273             VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
274             // Note: depth/stencil resolve uses color output stage and mask!
275             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
276             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
277             // Transition to: all reads and writes must happen after barrier.
278             VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
279             // Transition from: all writes must finish before barrier.
280             VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
281             ResourceAccess::Write,
282             PipelineStage::ColorAttachmentOutput,
283         },
284     },
285     {
286         ImageLayout::Present,
287         ImageMemoryBarrierData{
288             "Present",
289             VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
290             VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
291             VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
292             // transition to: vkQueuePresentKHR automatically performs the appropriate memory barriers:
293             //
294             // > Any writes to memory backing the images referenced by the pImageIndices and
295             // > pSwapchains members of pPresentInfo, that are available before vkQueuePresentKHR
296             // > is executed, are automatically made visible to the read access performed by the
297             // > presentation engine.
298             0,
299             // Transition from: RAR and WAR don't need memory barrier.
300             0,
301             ResourceAccess::ReadOnly,
302             PipelineStage::BottomOfPipe,
303         },
304     },
305     {
306         ImageLayout::ExternalPreInitialized,
307         ImageMemoryBarrierData{
308             "ExternalPreInitialized",
309             VK_IMAGE_LAYOUT_PREINITIALIZED,
310             VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
311             VK_PIPELINE_STAGE_HOST_BIT | VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
312             // Transition to: we don't expect to transition into PreInitialized.
313             0,
314             // Transition from: all writes must finish before barrier.
315             VK_ACCESS_MEMORY_WRITE_BIT,
316             ResourceAccess::ReadOnly,
317             PipelineStage::InvalidEnum,
318         },
319     },
320     {
321         ImageLayout::ExternalShadersReadOnly,
322         ImageMemoryBarrierData{
323             "ExternalShadersReadOnly",
324             VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
325             VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
326             VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
327             // Transition to: all reads must happen after barrier.
328             VK_ACCESS_SHADER_READ_BIT,
329             // Transition from: RAR and WAR don't need memory barrier.
330             0,
331             ResourceAccess::ReadOnly,
332             // In case of multiple destination stages, We barrier the earliest stage
333             PipelineStage::TopOfPipe,
334         },
335     },
336     {
337         ImageLayout::ExternalShadersWrite,
338         ImageMemoryBarrierData{
339             "ExternalShadersWrite",
340             VK_IMAGE_LAYOUT_GENERAL,
341             VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
342             VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
343             // Transition to: all reads and writes must happen after barrier.
344             VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
345             // Transition from: all writes must finish before barrier.
346             VK_ACCESS_SHADER_WRITE_BIT,
347             ResourceAccess::Write,
348             // In case of multiple destination stages, We barrier the earliest stage
349             PipelineStage::TopOfPipe,
350         },
351     },
352     {
353         ImageLayout::TransferSrc,
354         ImageMemoryBarrierData{
355             "TransferSrc",
356             VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
357             VK_PIPELINE_STAGE_TRANSFER_BIT,
358             VK_PIPELINE_STAGE_TRANSFER_BIT,
359             // Transition to: all reads must happen after barrier.
360             VK_ACCESS_TRANSFER_READ_BIT,
361             // Transition from: RAR and WAR don't need memory barrier.
362             0,
363             ResourceAccess::ReadOnly,
364             PipelineStage::Transfer,
365         },
366     },
367     {
368         ImageLayout::TransferDst,
369         ImageMemoryBarrierData{
370             "TransferDst",
371             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
372             VK_PIPELINE_STAGE_TRANSFER_BIT,
373             VK_PIPELINE_STAGE_TRANSFER_BIT,
374             // Transition to: all writes must happen after barrier.
375             VK_ACCESS_TRANSFER_WRITE_BIT,
376             // Transition from: all writes must finish before barrier.
377             VK_ACCESS_TRANSFER_WRITE_BIT,
378             ResourceAccess::Write,
379             PipelineStage::Transfer,
380         },
381     },
382     {
383         ImageLayout::VertexShaderReadOnly,
384         ImageMemoryBarrierData{
385             "VertexShaderReadOnly",
386             VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
387             VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
388             VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
389             // Transition to: all reads must happen after barrier.
390             VK_ACCESS_SHADER_READ_BIT,
391             // Transition from: RAR and WAR don't need memory barrier.
392             0,
393             ResourceAccess::ReadOnly,
394             PipelineStage::VertexShader,
395         },
396     },
397     {
398         ImageLayout::VertexShaderWrite,
399         ImageMemoryBarrierData{
400             "VertexShaderWrite",
401             VK_IMAGE_LAYOUT_GENERAL,
402             VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
403             VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
404             // Transition to: all reads and writes must happen after barrier.
405             VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
406             // Transition from: all writes must finish before barrier.
407             VK_ACCESS_SHADER_WRITE_BIT,
408             ResourceAccess::Write,
409             PipelineStage::VertexShader,
410         },
411     },
412     {
413         ImageLayout::PreFragmentShadersReadOnly,
414         ImageMemoryBarrierData{
415             "PreFragmentShadersReadOnly",
416             VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
417             kPreFragmentStageFlags,
418             kPreFragmentStageFlags,
419             // Transition to: all reads must happen after barrier.
420             VK_ACCESS_SHADER_READ_BIT,
421             // Transition from: RAR and WAR don't need memory barrier.
422             0,
423             ResourceAccess::ReadOnly,
424             // In case of multiple destination stages, We barrier the earliest stage
425             PipelineStage::VertexShader,
426         },
427     },
428     {
429         ImageLayout::PreFragmentShadersWrite,
430         ImageMemoryBarrierData{
431             "PreFragmentShadersWrite",
432             VK_IMAGE_LAYOUT_GENERAL,
433             kPreFragmentStageFlags,
434             kPreFragmentStageFlags,
435             // Transition to: all reads and writes must happen after barrier.
436             VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
437             // Transition from: all writes must finish before barrier.
438             VK_ACCESS_SHADER_WRITE_BIT,
439             ResourceAccess::Write,
440             // In case of multiple destination stages, We barrier the earliest stage
441             PipelineStage::VertexShader,
442         },
443     },
444     {
445         ImageLayout::FragmentShaderReadOnly,
446         ImageMemoryBarrierData{
447             "FragmentShaderReadOnly",
448             VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
449             VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
450             VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
451             // Transition to: all reads must happen after barrier.
452             VK_ACCESS_SHADER_READ_BIT,
453             // Transition from: RAR and WAR don't need memory barrier.
454             0,
455             ResourceAccess::ReadOnly,
456             PipelineStage::FragmentShader,
457         },
458     },
459     {
460         ImageLayout::FragmentShaderWrite,
461         ImageMemoryBarrierData{
462             "FragmentShaderWrite",
463             VK_IMAGE_LAYOUT_GENERAL,
464             VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
465             VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
466             // Transition to: all reads and writes must happen after barrier.
467             VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
468             // Transition from: all writes must finish before barrier.
469             VK_ACCESS_SHADER_WRITE_BIT,
470             ResourceAccess::Write,
471             PipelineStage::FragmentShader,
472         },
473     },
474     {
475         ImageLayout::ComputeShaderReadOnly,
476         ImageMemoryBarrierData{
477             "ComputeShaderReadOnly",
478             VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
479             VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
480             VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
481             // Transition to: all reads must happen after barrier.
482             VK_ACCESS_SHADER_READ_BIT,
483             // Transition from: RAR and WAR don't need memory barrier.
484             0,
485             ResourceAccess::ReadOnly,
486             PipelineStage::ComputeShader,
487         },
488     },
489     {
490         ImageLayout::ComputeShaderWrite,
491         ImageMemoryBarrierData{
492             "ComputeShaderWrite",
493             VK_IMAGE_LAYOUT_GENERAL,
494             VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
495             VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
496             // Transition to: all reads and writes must happen after barrier.
497             VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
498             // Transition from: all writes must finish before barrier.
499             VK_ACCESS_SHADER_WRITE_BIT,
500             ResourceAccess::Write,
501             PipelineStage::ComputeShader,
502         },
503     },
504     {
505         ImageLayout::AllGraphicsShadersReadOnly,
506         ImageMemoryBarrierData{
507             "AllGraphicsShadersReadOnly",
508             VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
509             kAllShadersPipelineStageFlags,
510             kAllShadersPipelineStageFlags,
511             // Transition to: all reads must happen after barrier.
512             VK_ACCESS_SHADER_READ_BIT,
513             // Transition from: RAR and WAR don't need memory barrier.
514             0,
515             ResourceAccess::ReadOnly,
516             // In case of multiple destination stages, We barrier the earliest stage
517             PipelineStage::VertexShader,
518         },
519     },
520     {
521         ImageLayout::AllGraphicsShadersWrite,
522         ImageMemoryBarrierData{
523             "AllGraphicsShadersWrite",
524             VK_IMAGE_LAYOUT_GENERAL,
525             kAllShadersPipelineStageFlags,
526             kAllShadersPipelineStageFlags,
527             // Transition to: all reads and writes must happen after barrier.
528             VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
529             // Transition from: all writes must finish before barrier.
530             VK_ACCESS_SHADER_WRITE_BIT,
531             ResourceAccess::Write,
532             // In case of multiple destination stages, We barrier the earliest stage
533             PipelineStage::VertexShader,
534         },
535     },
536 };
537 // clang-format on
538 
GetImageLayoutSrcStageMask(Context * context,const ImageMemoryBarrierData & transition)539 VkPipelineStageFlags GetImageLayoutSrcStageMask(Context *context,
540                                                 const ImageMemoryBarrierData &transition)
541 {
542     return transition.srcStageMask & context->getRenderer()->getSupportedVulkanPipelineStageMask();
543 }
544 
GetImageLayoutDstStageMask(Context * context,const ImageMemoryBarrierData & transition)545 VkPipelineStageFlags GetImageLayoutDstStageMask(Context *context,
546                                                 const ImageMemoryBarrierData &transition)
547 {
548     return transition.dstStageMask & context->getRenderer()->getSupportedVulkanPipelineStageMask();
549 }
550 
GetImageCreateFlags(gl::TextureType textureType)551 VkImageCreateFlags GetImageCreateFlags(gl::TextureType textureType)
552 {
553     switch (textureType)
554     {
555         case gl::TextureType::CubeMap:
556         case gl::TextureType::CubeMapArray:
557             return VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
558 
559         case gl::TextureType::_3D:
560             return VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT;
561 
562         default:
563             return 0;
564     }
565 }
566 
HandlePrimitiveRestart(ContextVk * contextVk,gl::DrawElementsType glIndexType,GLsizei indexCount,const uint8_t * srcPtr,uint8_t * outPtr)567 void HandlePrimitiveRestart(ContextVk *contextVk,
568                             gl::DrawElementsType glIndexType,
569                             GLsizei indexCount,
570                             const uint8_t *srcPtr,
571                             uint8_t *outPtr)
572 {
573     switch (glIndexType)
574     {
575         case gl::DrawElementsType::UnsignedByte:
576             if (contextVk->getFeatures().supportsIndexTypeUint8.enabled)
577             {
578                 CopyLineLoopIndicesWithRestart<uint8_t, uint8_t>(indexCount, srcPtr, outPtr);
579             }
580             else
581             {
582                 CopyLineLoopIndicesWithRestart<uint8_t, uint16_t>(indexCount, srcPtr, outPtr);
583             }
584             break;
585         case gl::DrawElementsType::UnsignedShort:
586             CopyLineLoopIndicesWithRestart<uint16_t, uint16_t>(indexCount, srcPtr, outPtr);
587             break;
588         case gl::DrawElementsType::UnsignedInt:
589             CopyLineLoopIndicesWithRestart<uint32_t, uint32_t>(indexCount, srcPtr, outPtr);
590             break;
591         default:
592             UNREACHABLE();
593     }
594 }
595 
HasBothDepthAndStencilAspects(VkImageAspectFlags aspectFlags)596 bool HasBothDepthAndStencilAspects(VkImageAspectFlags aspectFlags)
597 {
598     return IsMaskFlagSet(aspectFlags, kDepthStencilAspects);
599 }
600 
GetContentDefinedLayerRangeBits(uint32_t layerStart,uint32_t layerCount,uint32_t maxLayerCount)601 uint8_t GetContentDefinedLayerRangeBits(uint32_t layerStart,
602                                         uint32_t layerCount,
603                                         uint32_t maxLayerCount)
604 {
605     uint8_t layerRangeBits = layerCount >= maxLayerCount ? static_cast<uint8_t>(~0u)
606                                                          : angle::BitMask<uint8_t>(layerCount);
607     layerRangeBits <<= layerStart;
608 
609     return layerRangeBits;
610 }
611 
GetImageLayerCountForView(const ImageHelper & image)612 uint32_t GetImageLayerCountForView(const ImageHelper &image)
613 {
614     // Depth > 1 means this is a 3D texture and depth is our layer count
615     return image.getExtents().depth > 1 ? image.getExtents().depth : image.getLayerCount();
616 }
617 
ReleaseImageViews(ImageViewVector * imageViewVector,std::vector<GarbageObject> * garbage)618 void ReleaseImageViews(ImageViewVector *imageViewVector, std::vector<GarbageObject> *garbage)
619 {
620     for (ImageView &imageView : *imageViewVector)
621     {
622         if (imageView.valid())
623         {
624             garbage->emplace_back(GetGarbage(&imageView));
625         }
626     }
627     imageViewVector->clear();
628 }
629 
DestroyImageViews(ImageViewVector * imageViewVector,VkDevice device)630 void DestroyImageViews(ImageViewVector *imageViewVector, VkDevice device)
631 {
632     for (ImageView &imageView : *imageViewVector)
633     {
634         imageView.destroy(device);
635     }
636     imageViewVector->clear();
637 }
638 
GetLevelImageView(ImageViewVector * imageViews,LevelIndex levelVk,uint32_t levelCount)639 ImageView *GetLevelImageView(ImageViewVector *imageViews, LevelIndex levelVk, uint32_t levelCount)
640 {
641     // Lazily allocate the storage for image views. We allocate the full level count because we
642     // don't want to trigger any std::vector reallocations. Reallocations could invalidate our
643     // view pointers.
644     if (imageViews->empty())
645     {
646         imageViews->resize(levelCount);
647     }
648     ASSERT(imageViews->size() > levelVk.get());
649 
650     return &(*imageViews)[levelVk.get()];
651 }
652 
GetLevelLayerImageView(LayerLevelImageViewVector * imageViews,LevelIndex levelVk,uint32_t layer,uint32_t levelCount,uint32_t layerCount)653 ImageView *GetLevelLayerImageView(LayerLevelImageViewVector *imageViews,
654                                   LevelIndex levelVk,
655                                   uint32_t layer,
656                                   uint32_t levelCount,
657                                   uint32_t layerCount)
658 {
659     // Lazily allocate the storage for image views. We allocate the full layer count because we
660     // don't want to trigger any std::vector reallocations. Reallocations could invalidate our
661     // view pointers.
662     if (imageViews->empty())
663     {
664         imageViews->resize(layerCount);
665     }
666     ASSERT(imageViews->size() > layer);
667 
668     return GetLevelImageView(&(*imageViews)[layer], levelVk, levelCount);
669 }
670 
671 // Special rules apply to VkBufferImageCopy with depth/stencil. The components are tightly packed
672 // into a depth or stencil section of the destination buffer. See the spec:
673 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/VkBufferImageCopy.html
GetDepthStencilImageToBufferFormat(const angle::Format & imageFormat,VkImageAspectFlagBits copyAspect)674 const angle::Format &GetDepthStencilImageToBufferFormat(const angle::Format &imageFormat,
675                                                         VkImageAspectFlagBits copyAspect)
676 {
677     if (copyAspect == VK_IMAGE_ASPECT_STENCIL_BIT)
678     {
679         ASSERT(imageFormat.id == angle::FormatID::D24_UNORM_S8_UINT ||
680                imageFormat.id == angle::FormatID::D32_FLOAT_S8X24_UINT ||
681                imageFormat.id == angle::FormatID::S8_UINT);
682         return angle::Format::Get(angle::FormatID::S8_UINT);
683     }
684 
685     ASSERT(copyAspect == VK_IMAGE_ASPECT_DEPTH_BIT);
686 
687     switch (imageFormat.id)
688     {
689         case angle::FormatID::D16_UNORM:
690             return imageFormat;
691         case angle::FormatID::D24_UNORM_X8_UINT:
692             return imageFormat;
693         case angle::FormatID::D24_UNORM_S8_UINT:
694             return angle::Format::Get(angle::FormatID::D24_UNORM_X8_UINT);
695         case angle::FormatID::D32_FLOAT:
696             return imageFormat;
697         case angle::FormatID::D32_FLOAT_S8X24_UINT:
698             return angle::Format::Get(angle::FormatID::D32_FLOAT);
699         default:
700             UNREACHABLE();
701             return imageFormat;
702     }
703 }
704 
GetRobustResourceClearValue(const Format & format)705 VkClearValue GetRobustResourceClearValue(const Format &format)
706 {
707     VkClearValue clearValue = {};
708     if (format.intendedFormat().hasDepthOrStencilBits())
709     {
710         clearValue.depthStencil = kRobustInitDepthStencilValue;
711     }
712     else
713     {
714         clearValue.color =
715             format.hasEmulatedImageChannels() ? kEmulatedInitColorValue : kRobustInitColorValue;
716     }
717     return clearValue;
718 }
719 
720 #if !defined(ANGLE_PLATFORM_MACOS) && !defined(ANGLE_PLATFORM_ANDROID)
IsExternalQueueFamily(uint32_t queueFamilyIndex)721 bool IsExternalQueueFamily(uint32_t queueFamilyIndex)
722 {
723     return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
724            queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
725 }
726 #endif
727 
IsShaderReadOnlyLayout(const ImageMemoryBarrierData & imageLayout)728 bool IsShaderReadOnlyLayout(const ImageMemoryBarrierData &imageLayout)
729 {
730     return imageLayout.layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
731 }
732 
IsAnySubresourceContentDefined(const gl::TexLevelArray<angle::BitSet8<8>> & contentDefined)733 bool IsAnySubresourceContentDefined(const gl::TexLevelArray<angle::BitSet8<8>> &contentDefined)
734 {
735     for (const angle::BitSet8<8> &levelContentDefined : contentDefined)
736     {
737         if (levelContentDefined.any())
738         {
739             return true;
740         }
741     }
742     return false;
743 }
744 
ExtendRenderPassInvalidateArea(const gl::Rectangle & invalidateArea,gl::Rectangle * out)745 void ExtendRenderPassInvalidateArea(const gl::Rectangle &invalidateArea, gl::Rectangle *out)
746 {
747     if (out->empty())
748     {
749         *out = invalidateArea;
750     }
751     else
752     {
753         gl::ExtendRectangle(*out, invalidateArea, out);
754     }
755 }
756 
CanCopyWithTransferForCopyImage(RendererVk * renderer,const Format & srcFormat,VkImageTiling srcTilingMode,const Format & destFormat,VkImageTiling destTilingMode)757 bool CanCopyWithTransferForCopyImage(RendererVk *renderer,
758                                      const Format &srcFormat,
759                                      VkImageTiling srcTilingMode,
760                                      const Format &destFormat,
761                                      VkImageTiling destTilingMode)
762 {
763     // Neither source nor destination formats can be emulated for copy image through transfer,
764     // unless they are emualted with the same format!
765     bool isFormatCompatible =
766         (!srcFormat.hasEmulatedImageFormat() && !destFormat.hasEmulatedImageFormat()) ||
767         srcFormat.actualImageFormatID == destFormat.actualImageFormatID;
768 
769     // If neither formats are emulated, GL validation ensures that pixelBytes is the same for both.
770     ASSERT(!isFormatCompatible ||
771            srcFormat.actualImageFormat().pixelBytes == destFormat.actualImageFormat().pixelBytes);
772 
773     return isFormatCompatible &&
774            CanCopyWithTransfer(renderer, srcFormat, srcTilingMode, destFormat, destTilingMode);
775 }
776 
CanCopyWithTransformForReadPixels(const PackPixelsParams & packPixelsParams,const vk::Format * imageFormat,const angle::Format * readFormat)777 bool CanCopyWithTransformForReadPixels(const PackPixelsParams &packPixelsParams,
778                                        const vk::Format *imageFormat,
779                                        const angle::Format *readFormat)
780 {
781     // Don't allow copies from emulated formats for simplicity.
782     const bool isEmulatedFormat = imageFormat->hasEmulatedImageFormat();
783 
784     // Only allow copies to PBOs with identical format.
785     const bool isSameFormatCopy = *readFormat == *packPixelsParams.destFormat;
786 
787     // Disallow any transformation.
788     const bool needsTransformation =
789         packPixelsParams.rotation != SurfaceRotation::Identity || packPixelsParams.reverseRowOrder;
790 
791     // Disallow copies when the output pitch cannot be correctly specified in Vulkan.
792     const bool isPitchMultipleOfTexelSize =
793         packPixelsParams.outputPitch % readFormat->pixelBytes == 0;
794 
795     return !isEmulatedFormat && isSameFormatCopy && !needsTransformation &&
796            isPitchMultipleOfTexelSize;
797 }
798 
ReleaseBufferListToRenderer(RendererVk * renderer,BufferHelperPointerVector * buffers)799 void ReleaseBufferListToRenderer(RendererVk *renderer, BufferHelperPointerVector *buffers)
800 {
801     for (std::unique_ptr<BufferHelper> &toFree : *buffers)
802     {
803         toFree->release(renderer);
804     }
805     buffers->clear();
806 }
807 
DestroyBufferList(RendererVk * renderer,BufferHelperPointerVector * buffers)808 void DestroyBufferList(RendererVk *renderer, BufferHelperPointerVector *buffers)
809 {
810     for (std::unique_ptr<BufferHelper> &toDestroy : *buffers)
811     {
812         toDestroy->destroy(renderer);
813     }
814     buffers->clear();
815 }
816 
ShouldReleaseFreeBuffer(const vk::BufferHelper & buffer,size_t dynamicBufferSize,DynamicBufferPolicy policy,size_t freeListSize)817 bool ShouldReleaseFreeBuffer(const vk::BufferHelper &buffer,
818                              size_t dynamicBufferSize,
819                              DynamicBufferPolicy policy,
820                              size_t freeListSize)
821 {
822     constexpr size_t kLimitedFreeListMaxSize = 1;
823 
824     // If the dynamic buffer was resized we cannot reuse the retained buffer.  Additionally,
825     // only reuse the buffer if specifically requested.
826     const bool sizeMismatch    = buffer.getSize() != dynamicBufferSize;
827     const bool releaseByPolicy = policy == DynamicBufferPolicy::OneShotUse ||
828                                  (policy == DynamicBufferPolicy::SporadicTextureUpload &&
829                                   freeListSize >= kLimitedFreeListMaxSize);
830 
831     return sizeMismatch || releaseByPolicy;
832 }
833 }  // anonymous namespace
834 
835 // This is an arbitrary max. We can change this later if necessary.
836 uint32_t DynamicDescriptorPool::mMaxSetsPerPool           = 16;
837 uint32_t DynamicDescriptorPool::mMaxSetsPerPoolMultiplier = 2;
838 
ConvertImageLayoutToVkImageLayout(ImageLayout imageLayout)839 VkImageLayout ConvertImageLayoutToVkImageLayout(ImageLayout imageLayout)
840 {
841     return kImageMemoryBarrierData[imageLayout].layout;
842 }
843 
FormatHasNecessaryFeature(RendererVk * renderer,angle::FormatID formatID,VkImageTiling tilingMode,VkFormatFeatureFlags featureBits)844 bool FormatHasNecessaryFeature(RendererVk *renderer,
845                                angle::FormatID formatID,
846                                VkImageTiling tilingMode,
847                                VkFormatFeatureFlags featureBits)
848 {
849     return (tilingMode == VK_IMAGE_TILING_OPTIMAL)
850                ? renderer->hasImageFormatFeatureBits(formatID, featureBits)
851                : renderer->hasLinearImageFormatFeatureBits(formatID, featureBits);
852 }
853 
CanCopyWithTransfer(RendererVk * renderer,const Format & srcFormat,VkImageTiling srcTilingMode,const Format & destFormat,VkImageTiling destTilingMode)854 bool CanCopyWithTransfer(RendererVk *renderer,
855                          const Format &srcFormat,
856                          VkImageTiling srcTilingMode,
857                          const Format &destFormat,
858                          VkImageTiling destTilingMode)
859 {
860     // Checks that the formats in the copy transfer have the appropriate tiling and transfer bits
861     bool isTilingCompatible           = srcTilingMode == destTilingMode;
862     bool srcFormatHasNecessaryFeature = FormatHasNecessaryFeature(
863         renderer, srcFormat.actualImageFormatID, srcTilingMode, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT);
864     bool dstFormatHasNecessaryFeature =
865         FormatHasNecessaryFeature(renderer, destFormat.actualImageFormatID, destTilingMode,
866                                   VK_FORMAT_FEATURE_TRANSFER_DST_BIT);
867 
868     return isTilingCompatible && srcFormatHasNecessaryFeature && dstFormatHasNecessaryFeature;
869 }
870 
871 // PackedClearValuesArray implementation
PackedClearValuesArray()872 PackedClearValuesArray::PackedClearValuesArray() : mValues{} {}
873 PackedClearValuesArray::~PackedClearValuesArray() = default;
874 
875 PackedClearValuesArray::PackedClearValuesArray(const PackedClearValuesArray &other) = default;
876 PackedClearValuesArray &PackedClearValuesArray::operator=(const PackedClearValuesArray &rhs) =
877     default;
878 
store(PackedAttachmentIndex index,VkImageAspectFlags aspectFlags,const VkClearValue & clearValue)879 void PackedClearValuesArray::store(PackedAttachmentIndex index,
880                                    VkImageAspectFlags aspectFlags,
881                                    const VkClearValue &clearValue)
882 {
883     ASSERT(aspectFlags != 0);
884     if (aspectFlags != VK_IMAGE_ASPECT_STENCIL_BIT)
885     {
886         storeNoDepthStencil(index, clearValue);
887     }
888 }
889 
storeNoDepthStencil(PackedAttachmentIndex index,const VkClearValue & clearValue)890 void PackedClearValuesArray::storeNoDepthStencil(PackedAttachmentIndex index,
891                                                  const VkClearValue &clearValue)
892 {
893     mValues[index.get()] = clearValue;
894 }
895 
896 // CommandBufferHelper implementation.
CommandBufferHelper()897 CommandBufferHelper::CommandBufferHelper()
898     : mPipelineBarriers(),
899       mPipelineBarrierMask(),
900       mCounter(0),
901       mClearValues{},
902       mRenderPassStarted(false),
903       mTransformFeedbackCounterBuffers{},
904       mValidTransformFeedbackBufferCount(0),
905       mRebindTransformFeedbackBuffers(false),
906       mIsTransformFeedbackActiveUnpaused(false),
907       mIsRenderPassCommandBuffer(false),
908       mHasShaderStorageOutput(false),
909       mHasGLMemoryBarrierIssued(false),
910       mDepthAccess(ResourceAccess::Unused),
911       mStencilAccess(ResourceAccess::Unused),
912       mDepthCmdSizeInvalidated(kInfiniteCmdSize),
913       mDepthCmdSizeDisabled(kInfiniteCmdSize),
914       mStencilCmdSizeInvalidated(kInfiniteCmdSize),
915       mStencilCmdSizeDisabled(kInfiniteCmdSize),
916       mDepthStencilAttachmentIndex(kAttachmentIndexInvalid),
917       mDepthStencilImage(nullptr),
918       mDepthStencilResolveImage(nullptr),
919       mDepthStencilLevelIndex(0),
920       mDepthStencilLayerIndex(0),
921       mDepthStencilLayerCount(0),
922       mColorImagesCount(0),
923       mImageOptimizeForPresent(nullptr)
924 {}
925 
~CommandBufferHelper()926 CommandBufferHelper::~CommandBufferHelper()
927 {
928     mFramebuffer.setHandle(VK_NULL_HANDLE);
929 }
930 
initialize(bool isRenderPassCommandBuffer)931 void CommandBufferHelper::initialize(bool isRenderPassCommandBuffer)
932 {
933     ASSERT(mUsedBuffers.empty());
934     constexpr size_t kInitialBufferCount = 128;
935     mUsedBuffers.ensureCapacity(kInitialBufferCount);
936 
937     mAllocator.initialize(kDefaultPoolAllocatorPageSize, 1);
938     // Push a scope into the pool allocator so we can easily free and re-init on reset()
939     mAllocator.push();
940     mCommandBuffer.initialize(&mAllocator);
941     mIsRenderPassCommandBuffer = isRenderPassCommandBuffer;
942 }
943 
reset()944 void CommandBufferHelper::reset()
945 {
946     mAllocator.pop();
947     mAllocator.push();
948     mCommandBuffer.reset();
949     mUsedBuffers.clear();
950 
951     if (mIsRenderPassCommandBuffer)
952     {
953         mRenderPassStarted                 = false;
954         mValidTransformFeedbackBufferCount = 0;
955         mRebindTransformFeedbackBuffers    = false;
956         mHasShaderStorageOutput            = false;
957         mHasGLMemoryBarrierIssued          = false;
958         mDepthAccess                       = ResourceAccess::Unused;
959         mStencilAccess                     = ResourceAccess::Unused;
960         mDepthCmdSizeInvalidated           = kInfiniteCmdSize;
961         mDepthCmdSizeDisabled              = kInfiniteCmdSize;
962         mStencilCmdSizeInvalidated         = kInfiniteCmdSize;
963         mStencilCmdSizeDisabled            = kInfiniteCmdSize;
964         mColorImagesCount                  = PackedAttachmentCount(0);
965         mDepthStencilAttachmentIndex       = kAttachmentIndexInvalid;
966         mDepthInvalidateArea               = gl::Rectangle();
967         mStencilInvalidateArea             = gl::Rectangle();
968         mRenderPassUsedImages.clear();
969         mDepthStencilImage        = nullptr;
970         mDepthStencilResolveImage = nullptr;
971         mColorImages.reset();
972         mColorResolveImages.reset();
973         mImageOptimizeForPresent = nullptr;
974     }
975     // This state should never change for non-renderPass command buffer
976     ASSERT(mRenderPassStarted == false);
977     ASSERT(mValidTransformFeedbackBufferCount == 0);
978     ASSERT(!mRebindTransformFeedbackBuffers);
979     ASSERT(!mIsTransformFeedbackActiveUnpaused);
980     ASSERT(mRenderPassUsedImages.empty());
981 }
982 
usesBuffer(const BufferHelper & buffer) const983 bool CommandBufferHelper::usesBuffer(const BufferHelper &buffer) const
984 {
985     return mUsedBuffers.contains(buffer.getBufferSerial().getValue());
986 }
987 
usesBufferForWrite(const BufferHelper & buffer) const988 bool CommandBufferHelper::usesBufferForWrite(const BufferHelper &buffer) const
989 {
990     BufferAccess access;
991     if (!mUsedBuffers.get(buffer.getBufferSerial().getValue(), &access))
992     {
993         return false;
994     }
995     return access == BufferAccess::Write;
996 }
997 
bufferRead(ContextVk * contextVk,VkAccessFlags readAccessType,PipelineStage readStage,BufferHelper * buffer)998 void CommandBufferHelper::bufferRead(ContextVk *contextVk,
999                                      VkAccessFlags readAccessType,
1000                                      PipelineStage readStage,
1001                                      BufferHelper *buffer)
1002 {
1003     buffer->retain(&contextVk->getResourceUseList());
1004     VkPipelineStageFlagBits stageBits = kPipelineStageFlagBitMap[readStage];
1005     if (buffer->recordReadBarrier(readAccessType, stageBits, &mPipelineBarriers[readStage]))
1006     {
1007         mPipelineBarrierMask.set(readStage);
1008     }
1009 
1010     ASSERT(!usesBufferForWrite(*buffer));
1011     if (!mUsedBuffers.contains(buffer->getBufferSerial().getValue()))
1012     {
1013         mUsedBuffers.insert(buffer->getBufferSerial().getValue(), BufferAccess::Read);
1014     }
1015 }
1016 
bufferWrite(ContextVk * contextVk,VkAccessFlags writeAccessType,PipelineStage writeStage,AliasingMode aliasingMode,BufferHelper * buffer)1017 void CommandBufferHelper::bufferWrite(ContextVk *contextVk,
1018                                       VkAccessFlags writeAccessType,
1019                                       PipelineStage writeStage,
1020                                       AliasingMode aliasingMode,
1021                                       BufferHelper *buffer)
1022 {
1023     buffer->retain(&contextVk->getResourceUseList());
1024     VkPipelineStageFlagBits stageBits = kPipelineStageFlagBitMap[writeStage];
1025     if (buffer->recordWriteBarrier(writeAccessType, stageBits, &mPipelineBarriers[writeStage]))
1026     {
1027         mPipelineBarrierMask.set(writeStage);
1028     }
1029 
1030     // Storage buffers are special. They can alias one another in a shader.
1031     // We support aliasing by not tracking storage buffers. This works well with the GL API
1032     // because storage buffers are required to be externally synchronized.
1033     // Compute / XFB emulation buffers are not allowed to alias.
1034     if (aliasingMode == AliasingMode::Disallowed)
1035     {
1036         ASSERT(!usesBuffer(*buffer));
1037         mUsedBuffers.insert(buffer->getBufferSerial().getValue(), BufferAccess::Write);
1038     }
1039 
1040     // Make sure host-visible buffer writes result in a barrier inserted at the end of the frame to
1041     // make the results visible to the host.  The buffer may be mapped by the application in the
1042     // future.
1043     if (buffer->isHostVisible())
1044     {
1045         contextVk->onHostVisibleBufferWrite();
1046     }
1047 }
1048 
imageRead(ContextVk * contextVk,VkImageAspectFlags aspectFlags,ImageLayout imageLayout,ImageHelper * image)1049 void CommandBufferHelper::imageRead(ContextVk *contextVk,
1050                                     VkImageAspectFlags aspectFlags,
1051                                     ImageLayout imageLayout,
1052                                     ImageHelper *image)
1053 {
1054     image->retain(&contextVk->getResourceUseList());
1055 
1056     if (image->isReadBarrierNecessary(imageLayout))
1057     {
1058         updateImageLayoutAndBarrier(contextVk, image, aspectFlags, imageLayout);
1059     }
1060 
1061     if (mIsRenderPassCommandBuffer)
1062     {
1063         // As noted in the header we don't support multiple read layouts for Images.
1064         // We allow duplicate uses in the RP to accomodate for normal GL sampler usage.
1065         if (!usesImageInRenderPass(*image))
1066         {
1067             mRenderPassUsedImages.insert(image->getImageSerial().getValue());
1068         }
1069     }
1070 }
1071 
imageWrite(ContextVk * contextVk,gl::LevelIndex level,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags,ImageLayout imageLayout,AliasingMode aliasingMode,ImageHelper * image)1072 void CommandBufferHelper::imageWrite(ContextVk *contextVk,
1073                                      gl::LevelIndex level,
1074                                      uint32_t layerStart,
1075                                      uint32_t layerCount,
1076                                      VkImageAspectFlags aspectFlags,
1077                                      ImageLayout imageLayout,
1078                                      AliasingMode aliasingMode,
1079                                      ImageHelper *image)
1080 {
1081     image->retain(&contextVk->getResourceUseList());
1082     image->onWrite(level, 1, layerStart, layerCount, aspectFlags);
1083     // Write always requires a barrier
1084     updateImageLayoutAndBarrier(contextVk, image, aspectFlags, imageLayout);
1085 
1086     if (mIsRenderPassCommandBuffer)
1087     {
1088         // When used as a storage image we allow for aliased writes.
1089         if (aliasingMode == AliasingMode::Disallowed)
1090         {
1091             ASSERT(!usesImageInRenderPass(*image));
1092         }
1093         if (!usesImageInRenderPass(*image))
1094         {
1095             mRenderPassUsedImages.insert(image->getImageSerial().getValue());
1096         }
1097     }
1098 }
1099 
colorImagesDraw(ResourceUseList * resourceUseList,ImageHelper * image,ImageHelper * resolveImage,PackedAttachmentIndex packedAttachmentIndex)1100 void CommandBufferHelper::colorImagesDraw(ResourceUseList *resourceUseList,
1101                                           ImageHelper *image,
1102                                           ImageHelper *resolveImage,
1103                                           PackedAttachmentIndex packedAttachmentIndex)
1104 {
1105     ASSERT(mIsRenderPassCommandBuffer);
1106     ASSERT(packedAttachmentIndex < mColorImagesCount);
1107 
1108     image->retain(resourceUseList);
1109     if (!usesImageInRenderPass(*image))
1110     {
1111         // This is possible due to different layers of the same texture being attached to different
1112         // attachments
1113         mRenderPassUsedImages.insert(image->getImageSerial().getValue());
1114     }
1115     ASSERT(mColorImages[packedAttachmentIndex] == nullptr);
1116     mColorImages[packedAttachmentIndex] = image;
1117     image->setRenderPassUsageFlag(RenderPassUsage::RenderTargetAttachment);
1118 
1119     if (resolveImage)
1120     {
1121         resolveImage->retain(resourceUseList);
1122         if (!usesImageInRenderPass(*resolveImage))
1123         {
1124             mRenderPassUsedImages.insert(resolveImage->getImageSerial().getValue());
1125         }
1126         ASSERT(mColorResolveImages[packedAttachmentIndex] == nullptr);
1127         mColorResolveImages[packedAttachmentIndex] = resolveImage;
1128         resolveImage->setRenderPassUsageFlag(RenderPassUsage::RenderTargetAttachment);
1129     }
1130 }
1131 
depthStencilImagesDraw(ResourceUseList * resourceUseList,gl::LevelIndex level,uint32_t layerStart,uint32_t layerCount,ImageHelper * image,ImageHelper * resolveImage)1132 void CommandBufferHelper::depthStencilImagesDraw(ResourceUseList *resourceUseList,
1133                                                  gl::LevelIndex level,
1134                                                  uint32_t layerStart,
1135                                                  uint32_t layerCount,
1136                                                  ImageHelper *image,
1137                                                  ImageHelper *resolveImage)
1138 {
1139     ASSERT(mIsRenderPassCommandBuffer);
1140     ASSERT(!usesImageInRenderPass(*image));
1141     ASSERT(!resolveImage || !usesImageInRenderPass(*resolveImage));
1142 
1143     // Because depthStencil buffer's read/write property can change while we build renderpass, we
1144     // defer the image layout changes until endRenderPass time or when images going away so that we
1145     // only insert layout change barrier once.
1146     image->retain(resourceUseList);
1147     mRenderPassUsedImages.insert(image->getImageSerial().getValue());
1148     mDepthStencilImage      = image;
1149     mDepthStencilLevelIndex = level;
1150     mDepthStencilLayerIndex = layerStart;
1151     mDepthStencilLayerCount = layerCount;
1152     image->setRenderPassUsageFlag(RenderPassUsage::RenderTargetAttachment);
1153 
1154     if (resolveImage)
1155     {
1156         // Note that the resolve depth/stencil image has the same level/layer index as the
1157         // depth/stencil image as currently it can only ever come from
1158         // multisampled-render-to-texture renderbuffers.
1159         resolveImage->retain(resourceUseList);
1160         mRenderPassUsedImages.insert(resolveImage->getImageSerial().getValue());
1161         mDepthStencilResolveImage = resolveImage;
1162         resolveImage->setRenderPassUsageFlag(RenderPassUsage::RenderTargetAttachment);
1163     }
1164 }
1165 
onDepthAccess(ResourceAccess access)1166 void CommandBufferHelper::onDepthAccess(ResourceAccess access)
1167 {
1168     // Update the access for optimizing this render pass's loadOp
1169     UpdateAccess(&mDepthAccess, access);
1170 
1171     // Update the invalidate state for optimizing this render pass's storeOp
1172     if (onDepthStencilAccess(access, &mDepthCmdSizeInvalidated, &mDepthCmdSizeDisabled))
1173     {
1174         // The attachment is no longer invalid, so restore its content.
1175         restoreDepthContent();
1176     }
1177 }
1178 
onStencilAccess(ResourceAccess access)1179 void CommandBufferHelper::onStencilAccess(ResourceAccess access)
1180 {
1181     // Update the access for optimizing this render pass's loadOp
1182     UpdateAccess(&mStencilAccess, access);
1183 
1184     // Update the invalidate state for optimizing this render pass's stencilStoreOp
1185     if (onDepthStencilAccess(access, &mStencilCmdSizeInvalidated, &mStencilCmdSizeDisabled))
1186     {
1187         // The attachment is no longer invalid, so restore its content.
1188         restoreStencilContent();
1189     }
1190 }
1191 
onDepthStencilAccess(ResourceAccess access,uint32_t * cmdCountInvalidated,uint32_t * cmdCountDisabled)1192 bool CommandBufferHelper::onDepthStencilAccess(ResourceAccess access,
1193                                                uint32_t *cmdCountInvalidated,
1194                                                uint32_t *cmdCountDisabled)
1195 {
1196     if (*cmdCountInvalidated == kInfiniteCmdSize)
1197     {
1198         // If never invalidated or no longer invalidated, return early.
1199         return false;
1200     }
1201     if (access == ResourceAccess::Write)
1202     {
1203         // Drawing to this attachment is being enabled.  Assume that drawing will immediately occur
1204         // after this attachment is enabled, and that means that the attachment will no longer be
1205         // invalidated.
1206         *cmdCountInvalidated = kInfiniteCmdSize;
1207         *cmdCountDisabled    = kInfiniteCmdSize;
1208         // Return true to indicate that the store op should remain STORE and that mContentDefined
1209         // should be set to true;
1210         return true;
1211     }
1212     else
1213     {
1214         // Drawing to this attachment is being disabled.
1215         if (hasWriteAfterInvalidate(*cmdCountInvalidated, *cmdCountDisabled))
1216         {
1217             // The attachment was previously drawn while enabled, and so is no longer invalidated.
1218             *cmdCountInvalidated = kInfiniteCmdSize;
1219             *cmdCountDisabled    = kInfiniteCmdSize;
1220             // Return true to indicate that the store op should remain STORE and that
1221             // mContentDefined should be set to true;
1222             return true;
1223         }
1224         else
1225         {
1226             // Get the latest CmdSize at the start of being disabled.  At the end of the render
1227             // pass, cmdCountDisabled is <= the actual command buffer size, and so it's compared
1228             // with cmdCountInvalidated.  If the same, the attachment is still invalidated.
1229             *cmdCountDisabled = mCommandBuffer.getCommandSize();
1230             return false;
1231         }
1232     }
1233 }
1234 
updateStartedRenderPassWithDepthMode(bool readOnlyDepthStencilMode)1235 void CommandBufferHelper::updateStartedRenderPassWithDepthMode(bool readOnlyDepthStencilMode)
1236 {
1237     ASSERT(mIsRenderPassCommandBuffer);
1238     ASSERT(mRenderPassStarted);
1239 
1240     if (mDepthStencilImage)
1241     {
1242         if (readOnlyDepthStencilMode)
1243         {
1244             mDepthStencilImage->setRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment);
1245         }
1246         else
1247         {
1248             mDepthStencilImage->clearRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment);
1249         }
1250     }
1251 
1252     if (mDepthStencilResolveImage)
1253     {
1254         if (readOnlyDepthStencilMode)
1255         {
1256             mDepthStencilResolveImage->setRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment);
1257         }
1258         else
1259         {
1260             mDepthStencilResolveImage->clearRenderPassUsageFlag(
1261                 RenderPassUsage::ReadOnlyAttachment);
1262         }
1263     }
1264 }
1265 
restoreDepthContent()1266 void CommandBufferHelper::restoreDepthContent()
1267 {
1268     // Note that the image may have been deleted since the render pass has started.
1269     if (mDepthStencilImage)
1270     {
1271         ASSERT(mDepthStencilImage->valid());
1272         mDepthStencilImage->restoreSubresourceContent(
1273             mDepthStencilLevelIndex, mDepthStencilLayerIndex, mDepthStencilLayerCount);
1274         mDepthInvalidateArea = gl::Rectangle();
1275     }
1276 }
1277 
restoreStencilContent()1278 void CommandBufferHelper::restoreStencilContent()
1279 {
1280     // Note that the image may have been deleted since the render pass has started.
1281     if (mDepthStencilImage)
1282     {
1283         ASSERT(mDepthStencilImage->valid());
1284         mDepthStencilImage->restoreSubresourceStencilContent(
1285             mDepthStencilLevelIndex, mDepthStencilLayerIndex, mDepthStencilLayerCount);
1286         mStencilInvalidateArea = gl::Rectangle();
1287     }
1288 }
1289 
executeBarriers(const angle::FeaturesVk & features,PrimaryCommandBuffer * primary)1290 void CommandBufferHelper::executeBarriers(const angle::FeaturesVk &features,
1291                                           PrimaryCommandBuffer *primary)
1292 {
1293     // make a local copy for faster access
1294     PipelineStagesMask mask = mPipelineBarrierMask;
1295     if (mask.none())
1296     {
1297         return;
1298     }
1299 
1300     if (features.preferAggregateBarrierCalls.enabled)
1301     {
1302         PipelineStagesMask::Iterator iter = mask.begin();
1303         PipelineBarrier &barrier          = mPipelineBarriers[*iter];
1304         for (++iter; iter != mask.end(); ++iter)
1305         {
1306             barrier.merge(&mPipelineBarriers[*iter]);
1307         }
1308         barrier.execute(primary);
1309     }
1310     else
1311     {
1312         for (PipelineStage pipelineStage : mask)
1313         {
1314             PipelineBarrier &barrier = mPipelineBarriers[pipelineStage];
1315             barrier.execute(primary);
1316         }
1317     }
1318     mPipelineBarrierMask.reset();
1319 }
1320 
updateImageLayoutAndBarrier(Context * context,ImageHelper * image,VkImageAspectFlags aspectFlags,ImageLayout imageLayout)1321 void CommandBufferHelper::updateImageLayoutAndBarrier(Context *context,
1322                                                       ImageHelper *image,
1323                                                       VkImageAspectFlags aspectFlags,
1324                                                       ImageLayout imageLayout)
1325 {
1326     PipelineStage barrierIndex = kImageMemoryBarrierData[imageLayout].barrierIndex;
1327     ASSERT(barrierIndex != PipelineStage::InvalidEnum);
1328     PipelineBarrier *barrier = &mPipelineBarriers[barrierIndex];
1329     if (image->updateLayoutAndBarrier(context, aspectFlags, imageLayout, barrier))
1330     {
1331         mPipelineBarrierMask.set(barrierIndex);
1332     }
1333 }
1334 
finalizeColorImageLayout(Context * context,ImageHelper * image,PackedAttachmentIndex packedAttachmentIndex,bool isResolveImage)1335 void CommandBufferHelper::finalizeColorImageLayout(Context *context,
1336                                                    ImageHelper *image,
1337                                                    PackedAttachmentIndex packedAttachmentIndex,
1338                                                    bool isResolveImage)
1339 {
1340     ASSERT(mIsRenderPassCommandBuffer);
1341     ASSERT(packedAttachmentIndex < mColorImagesCount);
1342     ASSERT(image != nullptr);
1343 
1344     // Do layout change.
1345     ImageLayout imageLayout;
1346     if (image->usedByCurrentRenderPassAsAttachmentAndSampler())
1347     {
1348         // texture code already picked layout and inserted barrier
1349         imageLayout = image->getCurrentImageLayout();
1350         ASSERT(imageLayout == ImageLayout::ColorAttachmentAndFragmentShaderRead ||
1351                imageLayout == ImageLayout::ColorAttachmentAndAllShadersRead);
1352     }
1353     else
1354     {
1355         imageLayout = ImageLayout::ColorAttachment;
1356         updateImageLayoutAndBarrier(context, image, VK_IMAGE_ASPECT_COLOR_BIT, imageLayout);
1357     }
1358 
1359     if (!isResolveImage)
1360     {
1361         mAttachmentOps.setLayouts(packedAttachmentIndex, imageLayout, imageLayout);
1362     }
1363 
1364     if (mImageOptimizeForPresent == image)
1365     {
1366         ASSERT(packedAttachmentIndex == kAttachmentIndexZero);
1367         // Use finalLayout instead of extra barrier for layout change to present
1368         mImageOptimizeForPresent->setCurrentImageLayout(vk::ImageLayout::Present);
1369         // TODO(syoussefi):  We currently don't store the layout of the resolve attachments, so once
1370         // multisampled backbuffers are optimized to use resolve attachments, this information needs
1371         // to be stored somewhere.  http://anglebug.com/4836
1372         SetBitField(mAttachmentOps[packedAttachmentIndex].finalLayout,
1373                     mImageOptimizeForPresent->getCurrentImageLayout());
1374         mImageOptimizeForPresent = nullptr;
1375     }
1376 
1377     image->resetRenderPassUsageFlags();
1378 }
1379 
finalizeDepthStencilImageLayout(Context * context)1380 void CommandBufferHelper::finalizeDepthStencilImageLayout(Context *context)
1381 {
1382     ASSERT(mIsRenderPassCommandBuffer);
1383     ASSERT(mDepthStencilImage);
1384 
1385     // Do depth stencil layout change.
1386     ImageLayout imageLayout;
1387     bool barrierRequired;
1388 
1389     if (mDepthStencilImage->usedByCurrentRenderPassAsAttachmentAndSampler())
1390     {
1391         // texture code already picked layout and inserted barrier
1392         imageLayout = mDepthStencilImage->getCurrentImageLayout();
1393         if (mDepthStencilImage->hasRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment))
1394         {
1395             ASSERT(imageLayout == ImageLayout::DSAttachmentReadAndFragmentShaderRead ||
1396                    imageLayout == ImageLayout::DSAttachmentReadAndAllShadersRead);
1397             barrierRequired = mDepthStencilImage->isReadBarrierNecessary(imageLayout);
1398         }
1399         else
1400         {
1401             ASSERT(imageLayout == ImageLayout::DSAttachmentWriteAndFragmentShaderRead ||
1402                    imageLayout == ImageLayout::DSAttachmentWriteAndAllShadersRead);
1403             barrierRequired = true;
1404         }
1405     }
1406     else if (mDepthStencilImage->hasRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment))
1407     {
1408         imageLayout     = ImageLayout::DepthStencilAttachmentReadOnly;
1409         barrierRequired = mDepthStencilImage->isReadBarrierNecessary(imageLayout);
1410     }
1411     else
1412     {
1413         // Write always requires a barrier
1414         imageLayout     = ImageLayout::DepthStencilAttachment;
1415         barrierRequired = true;
1416     }
1417 
1418     mAttachmentOps.setLayouts(mDepthStencilAttachmentIndex, imageLayout, imageLayout);
1419 
1420     if (barrierRequired)
1421     {
1422         const angle::Format &format = mDepthStencilImage->getFormat().actualImageFormat();
1423         ASSERT(format.hasDepthOrStencilBits());
1424         VkImageAspectFlags aspectFlags = GetDepthStencilAspectFlags(format);
1425         updateImageLayoutAndBarrier(context, mDepthStencilImage, aspectFlags, imageLayout);
1426     }
1427 }
1428 
finalizeDepthStencilResolveImageLayout(Context * context)1429 void CommandBufferHelper::finalizeDepthStencilResolveImageLayout(Context *context)
1430 {
1431     ASSERT(mIsRenderPassCommandBuffer);
1432     ASSERT(mDepthStencilImage);
1433     ASSERT(!mDepthStencilResolveImage->hasRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment));
1434 
1435     ImageLayout imageLayout     = ImageLayout::DepthStencilResolveAttachment;
1436     const angle::Format &format = mDepthStencilResolveImage->getFormat().actualImageFormat();
1437     ASSERT(format.hasDepthOrStencilBits());
1438     VkImageAspectFlags aspectFlags = GetDepthStencilAspectFlags(format);
1439 
1440     updateImageLayoutAndBarrier(context, mDepthStencilResolveImage, aspectFlags, imageLayout);
1441 
1442     if (!mDepthStencilResolveImage->hasRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment))
1443     {
1444         ASSERT(mDepthStencilAttachmentIndex != kAttachmentIndexInvalid);
1445         const PackedAttachmentOpsDesc &dsOps = mAttachmentOps[mDepthStencilAttachmentIndex];
1446 
1447         // If the image is being written to, mark its contents defined.
1448         VkImageAspectFlags definedAspects = 0;
1449         if (!dsOps.isInvalidated)
1450         {
1451             definedAspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
1452         }
1453         if (!dsOps.isStencilInvalidated)
1454         {
1455             definedAspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
1456         }
1457         if (definedAspects != 0)
1458         {
1459             mDepthStencilResolveImage->onWrite(mDepthStencilLevelIndex, 1, mDepthStencilLayerIndex,
1460                                                mDepthStencilLayerCount, definedAspects);
1461         }
1462     }
1463 
1464     mDepthStencilResolveImage->resetRenderPassUsageFlags();
1465 }
1466 
finalizeImageLayout(Context * context,const ImageHelper * image)1467 void CommandBufferHelper::finalizeImageLayout(Context *context, const ImageHelper *image)
1468 {
1469     ASSERT(mIsRenderPassCommandBuffer);
1470 
1471     if (image->hasRenderPassUsageFlag(RenderPassUsage::RenderTargetAttachment))
1472     {
1473         for (PackedAttachmentIndex index = kAttachmentIndexZero; index < mColorImagesCount; ++index)
1474         {
1475             if (mColorImages[index] == image)
1476             {
1477                 finalizeColorImageLayout(context, mColorImages[index], index, false);
1478                 mColorImages[index] = nullptr;
1479             }
1480             else if (mColorResolveImages[index] == image)
1481             {
1482                 finalizeColorImageLayout(context, mColorResolveImages[index], index, true);
1483                 mColorResolveImages[index] = nullptr;
1484             }
1485         }
1486     }
1487 
1488     if (mDepthStencilImage == image)
1489     {
1490         finalizeDepthStencilImageLayoutAndLoadStore(context);
1491         mDepthStencilImage = nullptr;
1492     }
1493 
1494     if (mDepthStencilResolveImage == image)
1495     {
1496         finalizeDepthStencilResolveImageLayout(context);
1497         mDepthStencilResolveImage = nullptr;
1498     }
1499 }
1500 
finalizeDepthStencilLoadStore(Context * context)1501 void CommandBufferHelper::finalizeDepthStencilLoadStore(Context *context)
1502 {
1503     ASSERT(mDepthStencilAttachmentIndex != kAttachmentIndexInvalid);
1504 
1505     PackedAttachmentOpsDesc &dsOps = mAttachmentOps[mDepthStencilAttachmentIndex];
1506 
1507     // This has to be called after layout been finalized
1508     ASSERT(dsOps.initialLayout != static_cast<uint16_t>(ImageLayout::Undefined));
1509 
1510     // Ensure we don't write to a read-only RenderPass. (ReadOnly -> !Write)
1511     ASSERT(!mDepthStencilImage->hasRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment) ||
1512            (mDepthAccess != ResourceAccess::Write && mStencilAccess != ResourceAccess::Write));
1513 
1514     // If the attachment is invalidated, skip the store op.  If we are not loading or clearing the
1515     // attachment and the attachment has not been used, auto-invalidate it.
1516     const bool depthNotLoaded = dsOps.loadOp == VK_ATTACHMENT_LOAD_OP_DONT_CARE &&
1517                                 !mRenderPassDesc.hasDepthUnresolveAttachment();
1518     if (isInvalidated(mDepthCmdSizeInvalidated, mDepthCmdSizeDisabled) ||
1519         (depthNotLoaded && mDepthAccess != ResourceAccess::Write))
1520     {
1521         dsOps.storeOp       = RenderPassStoreOp::DontCare;
1522         dsOps.isInvalidated = true;
1523     }
1524     else if (hasWriteAfterInvalidate(mDepthCmdSizeInvalidated, mDepthCmdSizeDisabled))
1525     {
1526         // The depth attachment was invalidated, but is now valid.  Let the image know the contents
1527         // are now defined so a future render pass would use loadOp=LOAD.
1528         restoreDepthContent();
1529     }
1530     const bool stencilNotLoaded = dsOps.stencilLoadOp == VK_ATTACHMENT_LOAD_OP_DONT_CARE &&
1531                                   !mRenderPassDesc.hasStencilUnresolveAttachment();
1532     if (isInvalidated(mStencilCmdSizeInvalidated, mStencilCmdSizeDisabled) ||
1533         (stencilNotLoaded && mStencilAccess != ResourceAccess::Write))
1534     {
1535         dsOps.stencilStoreOp       = RenderPassStoreOp::DontCare;
1536         dsOps.isStencilInvalidated = true;
1537     }
1538     else if (hasWriteAfterInvalidate(mStencilCmdSizeInvalidated, mStencilCmdSizeDisabled))
1539     {
1540         // The stencil attachment was invalidated, but is now valid.  Let the image know the
1541         // contents are now defined so a future render pass would use loadOp=LOAD.
1542         restoreStencilContent();
1543     }
1544 
1545     // For read only depth stencil, we can use StoreOpNone if available. DONT_CARE is still
1546     // preferred, so do this after finish the DONT_CARE handling.
1547     if (mDepthStencilImage->hasRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment) &&
1548         context->getRenderer()->getFeatures().supportsRenderPassStoreOpNoneQCOM.enabled)
1549     {
1550         if (dsOps.storeOp == RenderPassStoreOp::Store)
1551         {
1552             dsOps.storeOp = RenderPassStoreOp::NoneQCOM;
1553         }
1554         if (dsOps.stencilStoreOp == RenderPassStoreOp::Store)
1555         {
1556             dsOps.stencilStoreOp = RenderPassStoreOp::NoneQCOM;
1557         }
1558     }
1559 
1560     // If we are loading or clearing the attachment, but the attachment has not been used, and the
1561     // data has also not been stored back into attachment, then just skip the load/clear op.
1562     if (mDepthAccess == ResourceAccess::Unused && dsOps.storeOp == VK_ATTACHMENT_STORE_OP_DONT_CARE)
1563     {
1564         dsOps.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
1565     }
1566 
1567     if (mStencilAccess == ResourceAccess::Unused &&
1568         dsOps.stencilStoreOp == RenderPassStoreOp::DontCare)
1569     {
1570         dsOps.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
1571     }
1572 
1573     // This has to be done after storeOp has been finalized.
1574     if (!mDepthStencilImage->hasRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment))
1575     {
1576         // If the image is being written to, mark its contents defined.
1577         VkImageAspectFlags definedAspects = 0;
1578         if (dsOps.storeOp == VK_ATTACHMENT_STORE_OP_STORE)
1579         {
1580             definedAspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
1581         }
1582         if (dsOps.stencilStoreOp == VK_ATTACHMENT_STORE_OP_STORE)
1583         {
1584             definedAspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
1585         }
1586         if (definedAspects != 0)
1587         {
1588             mDepthStencilImage->onWrite(mDepthStencilLevelIndex, 1, mDepthStencilLayerIndex,
1589                                         mDepthStencilLayerCount, definedAspects);
1590         }
1591     }
1592 }
1593 
finalizeDepthStencilImageLayoutAndLoadStore(Context * context)1594 void CommandBufferHelper::finalizeDepthStencilImageLayoutAndLoadStore(Context *context)
1595 {
1596     finalizeDepthStencilImageLayout(context);
1597     finalizeDepthStencilLoadStore(context);
1598     mDepthStencilImage->resetRenderPassUsageFlags();
1599 }
1600 
beginRenderPass(const Framebuffer & framebuffer,const gl::Rectangle & renderArea,const RenderPassDesc & renderPassDesc,const AttachmentOpsArray & renderPassAttachmentOps,const vk::PackedAttachmentCount colorAttachmentCount,const PackedAttachmentIndex depthStencilAttachmentIndex,const PackedClearValuesArray & clearValues,CommandBuffer ** commandBufferOut)1601 void CommandBufferHelper::beginRenderPass(const Framebuffer &framebuffer,
1602                                           const gl::Rectangle &renderArea,
1603                                           const RenderPassDesc &renderPassDesc,
1604                                           const AttachmentOpsArray &renderPassAttachmentOps,
1605                                           const vk::PackedAttachmentCount colorAttachmentCount,
1606                                           const PackedAttachmentIndex depthStencilAttachmentIndex,
1607                                           const PackedClearValuesArray &clearValues,
1608                                           CommandBuffer **commandBufferOut)
1609 {
1610     ASSERT(mIsRenderPassCommandBuffer);
1611     ASSERT(empty());
1612 
1613     mRenderPassDesc              = renderPassDesc;
1614     mAttachmentOps               = renderPassAttachmentOps;
1615     mDepthStencilAttachmentIndex = depthStencilAttachmentIndex;
1616     mColorImagesCount            = colorAttachmentCount;
1617     mFramebuffer.setHandle(framebuffer.getHandle());
1618     mRenderArea       = renderArea;
1619     mClearValues      = clearValues;
1620     *commandBufferOut = &mCommandBuffer;
1621 
1622     mRenderPassStarted = true;
1623     mCounter++;
1624 }
1625 
endRenderPass(ContextVk * contextVk)1626 void CommandBufferHelper::endRenderPass(ContextVk *contextVk)
1627 {
1628     for (PackedAttachmentIndex index = kAttachmentIndexZero; index < mColorImagesCount; ++index)
1629     {
1630         if (mColorImages[index])
1631         {
1632             finalizeColorImageLayout(contextVk, mColorImages[index], index, false);
1633         }
1634         if (mColorResolveImages[index])
1635         {
1636             finalizeColorImageLayout(contextVk, mColorResolveImages[index], index, true);
1637         }
1638     }
1639 
1640     if (mDepthStencilAttachmentIndex == kAttachmentIndexInvalid)
1641     {
1642         return;
1643     }
1644 
1645     // Do depth stencil layout change and load store optimization.
1646     if (mDepthStencilImage)
1647     {
1648         finalizeDepthStencilImageLayoutAndLoadStore(contextVk);
1649     }
1650     if (mDepthStencilResolveImage)
1651     {
1652         finalizeDepthStencilResolveImageLayout(contextVk);
1653     }
1654 }
1655 
beginTransformFeedback(size_t validBufferCount,const VkBuffer * counterBuffers,bool rebindBuffers)1656 void CommandBufferHelper::beginTransformFeedback(size_t validBufferCount,
1657                                                  const VkBuffer *counterBuffers,
1658                                                  bool rebindBuffers)
1659 {
1660     ASSERT(mIsRenderPassCommandBuffer);
1661     mValidTransformFeedbackBufferCount = static_cast<uint32_t>(validBufferCount);
1662     mRebindTransformFeedbackBuffers    = rebindBuffers;
1663 
1664     for (size_t index = 0; index < validBufferCount; index++)
1665     {
1666         mTransformFeedbackCounterBuffers[index] = counterBuffers[index];
1667     }
1668 }
1669 
endTransformFeedback()1670 void CommandBufferHelper::endTransformFeedback()
1671 {
1672     ASSERT(mIsRenderPassCommandBuffer);
1673     pauseTransformFeedback();
1674     mValidTransformFeedbackBufferCount = 0;
1675 }
1676 
invalidateRenderPassColorAttachment(PackedAttachmentIndex attachmentIndex)1677 void CommandBufferHelper::invalidateRenderPassColorAttachment(PackedAttachmentIndex attachmentIndex)
1678 {
1679     ASSERT(mIsRenderPassCommandBuffer);
1680     SetBitField(mAttachmentOps[attachmentIndex].storeOp, RenderPassStoreOp::DontCare);
1681     mAttachmentOps[attachmentIndex].isInvalidated = true;
1682 }
1683 
invalidateRenderPassDepthAttachment(const gl::DepthStencilState & dsState,const gl::Rectangle & invalidateArea)1684 void CommandBufferHelper::invalidateRenderPassDepthAttachment(const gl::DepthStencilState &dsState,
1685                                                               const gl::Rectangle &invalidateArea)
1686 {
1687     ASSERT(mIsRenderPassCommandBuffer);
1688     // Keep track of the size of commands in the command buffer.  If the size grows in the
1689     // future, that implies that drawing occured since invalidated.
1690     mDepthCmdSizeInvalidated = mCommandBuffer.getCommandSize();
1691 
1692     // Also track the size if the attachment is currently disabled.
1693     const bool isDepthWriteEnabled = dsState.depthTest && dsState.depthMask;
1694     mDepthCmdSizeDisabled = isDepthWriteEnabled ? kInfiniteCmdSize : mDepthCmdSizeInvalidated;
1695 
1696     // Set/extend the invalidate area.
1697     ExtendRenderPassInvalidateArea(invalidateArea, &mDepthInvalidateArea);
1698 }
1699 
invalidateRenderPassStencilAttachment(const gl::DepthStencilState & dsState,const gl::Rectangle & invalidateArea)1700 void CommandBufferHelper::invalidateRenderPassStencilAttachment(
1701     const gl::DepthStencilState &dsState,
1702     const gl::Rectangle &invalidateArea)
1703 {
1704     ASSERT(mIsRenderPassCommandBuffer);
1705     // Keep track of the size of commands in the command buffer.  If the size grows in the
1706     // future, that implies that drawing occured since invalidated.
1707     mStencilCmdSizeInvalidated = mCommandBuffer.getCommandSize();
1708 
1709     // Also track the size if the attachment is currently disabled.
1710     const bool isStencilWriteEnabled =
1711         dsState.stencilTest && (!dsState.isStencilNoOp() || !dsState.isStencilBackNoOp());
1712     mStencilCmdSizeDisabled = isStencilWriteEnabled ? kInfiniteCmdSize : mStencilCmdSizeInvalidated;
1713 
1714     // Set/extend the invalidate area.
1715     ExtendRenderPassInvalidateArea(invalidateArea, &mStencilInvalidateArea);
1716 }
1717 
flushToPrimary(const angle::FeaturesVk & features,PrimaryCommandBuffer * primary,const RenderPass * renderPass)1718 angle::Result CommandBufferHelper::flushToPrimary(const angle::FeaturesVk &features,
1719                                                   PrimaryCommandBuffer *primary,
1720                                                   const RenderPass *renderPass)
1721 {
1722     ANGLE_TRACE_EVENT0("gpu.angle", "CommandBufferHelper::flushToPrimary");
1723     ASSERT(!empty());
1724 
1725     // Commands that are added to primary before beginRenderPass command
1726     executeBarriers(features, primary);
1727 
1728     if (mIsRenderPassCommandBuffer)
1729     {
1730         ASSERT(renderPass != nullptr);
1731 
1732         VkRenderPassBeginInfo beginInfo    = {};
1733         beginInfo.sType                    = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
1734         beginInfo.renderPass               = renderPass->getHandle();
1735         beginInfo.framebuffer              = mFramebuffer.getHandle();
1736         beginInfo.renderArea.offset.x      = static_cast<uint32_t>(mRenderArea.x);
1737         beginInfo.renderArea.offset.y      = static_cast<uint32_t>(mRenderArea.y);
1738         beginInfo.renderArea.extent.width  = static_cast<uint32_t>(mRenderArea.width);
1739         beginInfo.renderArea.extent.height = static_cast<uint32_t>(mRenderArea.height);
1740         beginInfo.clearValueCount = static_cast<uint32_t>(mRenderPassDesc.attachmentCount());
1741         beginInfo.pClearValues    = mClearValues.data();
1742 
1743         // Run commands inside the RenderPass.
1744         primary->beginRenderPass(beginInfo, VK_SUBPASS_CONTENTS_INLINE);
1745         mCommandBuffer.executeCommands(primary->getHandle());
1746         primary->endRenderPass();
1747     }
1748     else
1749     {
1750         mCommandBuffer.executeCommands(primary->getHandle());
1751     }
1752 
1753     // Restart the command buffer.
1754     reset();
1755 
1756     return angle::Result::Continue;
1757 }
1758 
updateRenderPassForResolve(ContextVk * contextVk,Framebuffer * newFramebuffer,const RenderPassDesc & renderPassDesc)1759 void CommandBufferHelper::updateRenderPassForResolve(ContextVk *contextVk,
1760                                                      Framebuffer *newFramebuffer,
1761                                                      const RenderPassDesc &renderPassDesc)
1762 {
1763     ASSERT(newFramebuffer);
1764     mFramebuffer.setHandle(newFramebuffer->getHandle());
1765     mRenderPassDesc = renderPassDesc;
1766 }
1767 
1768 // Helper functions used below
GetLoadOpShorthand(uint32_t loadOp)1769 char GetLoadOpShorthand(uint32_t loadOp)
1770 {
1771     switch (loadOp)
1772     {
1773         case VK_ATTACHMENT_LOAD_OP_CLEAR:
1774             return 'C';
1775         case VK_ATTACHMENT_LOAD_OP_LOAD:
1776             return 'L';
1777         default:
1778             return 'D';
1779     }
1780 }
1781 
GetStoreOpShorthand(RenderPassStoreOp storeOp)1782 char GetStoreOpShorthand(RenderPassStoreOp storeOp)
1783 {
1784     switch (storeOp)
1785     {
1786         case RenderPassStoreOp::Store:
1787             return 'S';
1788         case RenderPassStoreOp::NoneQCOM:
1789             return 'N';
1790         default:
1791             return 'D';
1792     }
1793 }
1794 
addCommandDiagnostics(ContextVk * contextVk)1795 void CommandBufferHelper::addCommandDiagnostics(ContextVk *contextVk)
1796 {
1797     std::ostringstream out;
1798 
1799     out << "Memory Barrier: ";
1800     for (PipelineBarrier &barrier : mPipelineBarriers)
1801     {
1802         if (!barrier.isEmpty())
1803         {
1804             barrier.addDiagnosticsString(out);
1805         }
1806     }
1807     out << "\\l";
1808 
1809     if (mIsRenderPassCommandBuffer)
1810     {
1811         size_t attachmentCount             = mRenderPassDesc.attachmentCount();
1812         size_t depthStencilAttachmentCount = mRenderPassDesc.hasDepthStencilAttachment() ? 1 : 0;
1813         size_t colorAttachmentCount        = attachmentCount - depthStencilAttachmentCount;
1814 
1815         PackedAttachmentIndex attachmentIndexVk(0);
1816         std::string loadOps, storeOps;
1817 
1818         if (colorAttachmentCount > 0)
1819         {
1820             loadOps += " Color: ";
1821             storeOps += " Color: ";
1822 
1823             for (size_t i = 0; i < colorAttachmentCount; ++i)
1824             {
1825                 loadOps += GetLoadOpShorthand(mAttachmentOps[attachmentIndexVk].loadOp);
1826                 storeOps += GetStoreOpShorthand(
1827                     static_cast<RenderPassStoreOp>(mAttachmentOps[attachmentIndexVk].storeOp));
1828                 ++attachmentIndexVk;
1829             }
1830         }
1831 
1832         if (depthStencilAttachmentCount > 0)
1833         {
1834             ASSERT(depthStencilAttachmentCount == 1);
1835 
1836             loadOps += " Depth/Stencil: ";
1837             storeOps += " Depth/Stencil: ";
1838 
1839             loadOps += GetLoadOpShorthand(mAttachmentOps[attachmentIndexVk].loadOp);
1840             loadOps += GetLoadOpShorthand(mAttachmentOps[attachmentIndexVk].stencilLoadOp);
1841 
1842             storeOps += GetStoreOpShorthand(
1843                 static_cast<RenderPassStoreOp>(mAttachmentOps[attachmentIndexVk].storeOp));
1844             storeOps += GetStoreOpShorthand(
1845                 static_cast<RenderPassStoreOp>(mAttachmentOps[attachmentIndexVk].stencilStoreOp));
1846         }
1847 
1848         if (attachmentCount > 0)
1849         {
1850             out << "LoadOp:  " << loadOps << "\\l";
1851             out << "StoreOp: " << storeOps << "\\l";
1852         }
1853     }
1854     out << mCommandBuffer.dumpCommands("\\l");
1855     contextVk->addCommandBufferDiagnostics(out.str());
1856 }
1857 
resumeTransformFeedback()1858 void CommandBufferHelper::resumeTransformFeedback()
1859 {
1860     ASSERT(mIsRenderPassCommandBuffer);
1861     ASSERT(isTransformFeedbackStarted());
1862 
1863     uint32_t numCounterBuffers =
1864         mRebindTransformFeedbackBuffers ? 0 : mValidTransformFeedbackBufferCount;
1865 
1866     mRebindTransformFeedbackBuffers    = false;
1867     mIsTransformFeedbackActiveUnpaused = true;
1868 
1869     mCommandBuffer.beginTransformFeedback(0, numCounterBuffers,
1870                                           mTransformFeedbackCounterBuffers.data(), nullptr);
1871 }
1872 
pauseTransformFeedback()1873 void CommandBufferHelper::pauseTransformFeedback()
1874 {
1875     ASSERT(mIsRenderPassCommandBuffer);
1876     ASSERT(isTransformFeedbackStarted() && isTransformFeedbackActiveUnpaused());
1877     mIsTransformFeedbackActiveUnpaused = false;
1878     mCommandBuffer.endTransformFeedback(0, mValidTransformFeedbackBufferCount,
1879                                         mTransformFeedbackCounterBuffers.data(), nullptr);
1880 }
1881 
updateRenderPassColorClear(PackedAttachmentIndex colorIndexVk,const VkClearValue & clearValue)1882 void CommandBufferHelper::updateRenderPassColorClear(PackedAttachmentIndex colorIndexVk,
1883                                                      const VkClearValue &clearValue)
1884 {
1885     mAttachmentOps.setClearOp(colorIndexVk);
1886     mClearValues.store(colorIndexVk, VK_IMAGE_ASPECT_COLOR_BIT, clearValue);
1887 }
1888 
updateRenderPassDepthStencilClear(VkImageAspectFlags aspectFlags,const VkClearValue & clearValue)1889 void CommandBufferHelper::updateRenderPassDepthStencilClear(VkImageAspectFlags aspectFlags,
1890                                                             const VkClearValue &clearValue)
1891 {
1892     // Don't overwrite prior clear values for individual aspects.
1893     VkClearValue combinedClearValue = mClearValues[mDepthStencilAttachmentIndex];
1894 
1895     if ((aspectFlags & VK_IMAGE_ASPECT_DEPTH_BIT) != 0)
1896     {
1897         mAttachmentOps.setClearOp(mDepthStencilAttachmentIndex);
1898         combinedClearValue.depthStencil.depth = clearValue.depthStencil.depth;
1899     }
1900 
1901     if ((aspectFlags & VK_IMAGE_ASPECT_STENCIL_BIT) != 0)
1902     {
1903         mAttachmentOps.setClearStencilOp(mDepthStencilAttachmentIndex);
1904         combinedClearValue.depthStencil.stencil = clearValue.depthStencil.stencil;
1905     }
1906 
1907     // Bypass special D/S handling. This clear values array stores values packed.
1908     mClearValues.storeNoDepthStencil(mDepthStencilAttachmentIndex, combinedClearValue);
1909 }
1910 
growRenderArea(ContextVk * contextVk,const gl::Rectangle & newRenderArea)1911 void CommandBufferHelper::growRenderArea(ContextVk *contextVk, const gl::Rectangle &newRenderArea)
1912 {
1913     ASSERT(mIsRenderPassCommandBuffer);
1914 
1915     // The render area is grown such that it covers both the previous and the new render areas.
1916     gl::GetEnclosingRectangle(mRenderArea, newRenderArea, &mRenderArea);
1917 
1918     // Remove invalidates that are no longer applicable.
1919     if (!mDepthInvalidateArea.empty() && !mDepthInvalidateArea.encloses(mRenderArea))
1920     {
1921         ANGLE_PERF_WARNING(
1922             contextVk->getDebug(), GL_DEBUG_SEVERITY_LOW,
1923             "InvalidateSubFramebuffer for depth discarded due to increased scissor region");
1924         mDepthInvalidateArea     = gl::Rectangle();
1925         mDepthCmdSizeInvalidated = kInfiniteCmdSize;
1926     }
1927     if (!mStencilInvalidateArea.empty() && !mStencilInvalidateArea.encloses(mRenderArea))
1928     {
1929         ANGLE_PERF_WARNING(
1930             contextVk->getDebug(), GL_DEBUG_SEVERITY_LOW,
1931             "InvalidateSubFramebuffer for stencil discarded due to increased scissor region");
1932         mStencilInvalidateArea     = gl::Rectangle();
1933         mStencilCmdSizeInvalidated = kInfiniteCmdSize;
1934     }
1935 }
1936 
1937 // DynamicBuffer implementation.
DynamicBuffer()1938 DynamicBuffer::DynamicBuffer()
1939     : mUsage(0),
1940       mHostVisible(false),
1941       mPolicy(DynamicBufferPolicy::OneShotUse),
1942       mInitialSize(0),
1943       mNextAllocationOffset(0),
1944       mLastFlushOrInvalidateOffset(0),
1945       mSize(0),
1946       mAlignment(0),
1947       mMemoryPropertyFlags(0)
1948 {}
1949 
DynamicBuffer(DynamicBuffer && other)1950 DynamicBuffer::DynamicBuffer(DynamicBuffer &&other)
1951     : mUsage(other.mUsage),
1952       mHostVisible(other.mHostVisible),
1953       mPolicy(other.mPolicy),
1954       mInitialSize(other.mInitialSize),
1955       mBuffer(std::move(other.mBuffer)),
1956       mNextAllocationOffset(other.mNextAllocationOffset),
1957       mLastFlushOrInvalidateOffset(other.mLastFlushOrInvalidateOffset),
1958       mSize(other.mSize),
1959       mAlignment(other.mAlignment),
1960       mMemoryPropertyFlags(other.mMemoryPropertyFlags),
1961       mInFlightBuffers(std::move(other.mInFlightBuffers)),
1962       mBufferFreeList(std::move(other.mBufferFreeList))
1963 {}
1964 
init(RendererVk * renderer,VkBufferUsageFlags usage,size_t alignment,size_t initialSize,bool hostVisible,DynamicBufferPolicy policy)1965 void DynamicBuffer::init(RendererVk *renderer,
1966                          VkBufferUsageFlags usage,
1967                          size_t alignment,
1968                          size_t initialSize,
1969                          bool hostVisible,
1970                          DynamicBufferPolicy policy)
1971 {
1972     VkMemoryPropertyFlags memoryPropertyFlags =
1973         (hostVisible) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1974 
1975     initWithFlags(renderer, usage, alignment, initialSize, memoryPropertyFlags, policy);
1976 }
1977 
initWithFlags(RendererVk * renderer,VkBufferUsageFlags usage,size_t alignment,size_t initialSize,VkMemoryPropertyFlags memoryPropertyFlags,DynamicBufferPolicy policy)1978 void DynamicBuffer::initWithFlags(RendererVk *renderer,
1979                                   VkBufferUsageFlags usage,
1980                                   size_t alignment,
1981                                   size_t initialSize,
1982                                   VkMemoryPropertyFlags memoryPropertyFlags,
1983                                   DynamicBufferPolicy policy)
1984 {
1985     mUsage               = usage;
1986     mHostVisible         = ((memoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0);
1987     mMemoryPropertyFlags = memoryPropertyFlags;
1988     mPolicy              = policy;
1989 
1990     // Check that we haven't overriden the initial size of the buffer in setMinimumSizeForTesting.
1991     if (mInitialSize == 0)
1992     {
1993         mInitialSize = initialSize;
1994         mSize        = 0;
1995     }
1996 
1997     // Workaround for the mock ICD not supporting allocations greater than 0x1000.
1998     // Could be removed if https://github.com/KhronosGroup/Vulkan-Tools/issues/84 is fixed.
1999     if (renderer->isMockICDEnabled())
2000     {
2001         mSize = std::min<size_t>(mSize, 0x1000);
2002     }
2003 
2004     requireAlignment(renderer, alignment);
2005 }
2006 
~DynamicBuffer()2007 DynamicBuffer::~DynamicBuffer()
2008 {
2009     ASSERT(mBuffer == nullptr);
2010     ASSERT(mInFlightBuffers.empty());
2011     ASSERT(mBufferFreeList.empty());
2012 }
2013 
allocateNewBuffer(ContextVk * contextVk)2014 angle::Result DynamicBuffer::allocateNewBuffer(ContextVk *contextVk)
2015 {
2016     // Gather statistics
2017     const gl::OverlayType *overlay = contextVk->getOverlay();
2018     if (overlay->isEnabled())
2019     {
2020         gl::RunningGraphWidget *dynamicBufferAllocations =
2021             overlay->getRunningGraphWidget(gl::WidgetId::VulkanDynamicBufferAllocations);
2022         dynamicBufferAllocations->add(1);
2023     }
2024 
2025     // Allocate the buffer
2026     ASSERT(!mBuffer);
2027     mBuffer = std::make_unique<BufferHelper>();
2028 
2029     VkBufferCreateInfo createInfo    = {};
2030     createInfo.sType                 = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
2031     createInfo.flags                 = 0;
2032     createInfo.size                  = mSize;
2033     createInfo.usage                 = mUsage;
2034     createInfo.sharingMode           = VK_SHARING_MODE_EXCLUSIVE;
2035     createInfo.queueFamilyIndexCount = 0;
2036     createInfo.pQueueFamilyIndices   = nullptr;
2037 
2038     return mBuffer->init(contextVk, createInfo, mMemoryPropertyFlags);
2039 }
2040 
allocateFromCurrentBuffer(size_t sizeInBytes,uint8_t ** ptrOut,VkDeviceSize * offsetOut)2041 bool DynamicBuffer::allocateFromCurrentBuffer(size_t sizeInBytes,
2042                                               uint8_t **ptrOut,
2043                                               VkDeviceSize *offsetOut)
2044 {
2045     ASSERT(ptrOut);
2046     ASSERT(offsetOut);
2047     size_t sizeToAllocate                                      = roundUp(sizeInBytes, mAlignment);
2048     angle::base::CheckedNumeric<size_t> checkedNextWriteOffset = mNextAllocationOffset;
2049     checkedNextWriteOffset += sizeToAllocate;
2050 
2051     if (!checkedNextWriteOffset.IsValid() || checkedNextWriteOffset.ValueOrDie() >= mSize)
2052     {
2053         return false;
2054     }
2055 
2056     ASSERT(mBuffer != nullptr);
2057     ASSERT(mHostVisible);
2058     ASSERT(mBuffer->getMappedMemory());
2059 
2060     *ptrOut    = mBuffer->getMappedMemory() + mNextAllocationOffset;
2061     *offsetOut = static_cast<VkDeviceSize>(mNextAllocationOffset);
2062 
2063     mNextAllocationOffset += static_cast<uint32_t>(sizeToAllocate);
2064     return true;
2065 }
2066 
allocateWithAlignment(ContextVk * contextVk,size_t sizeInBytes,size_t alignment,uint8_t ** ptrOut,VkBuffer * bufferOut,VkDeviceSize * offsetOut,bool * newBufferAllocatedOut)2067 angle::Result DynamicBuffer::allocateWithAlignment(ContextVk *contextVk,
2068                                                    size_t sizeInBytes,
2069                                                    size_t alignment,
2070                                                    uint8_t **ptrOut,
2071                                                    VkBuffer *bufferOut,
2072                                                    VkDeviceSize *offsetOut,
2073                                                    bool *newBufferAllocatedOut)
2074 {
2075     mNextAllocationOffset =
2076         roundUp<uint32_t>(mNextAllocationOffset, static_cast<uint32_t>(alignment));
2077     size_t sizeToAllocate = roundUp(sizeInBytes, mAlignment);
2078 
2079     angle::base::CheckedNumeric<size_t> checkedNextWriteOffset = mNextAllocationOffset;
2080     checkedNextWriteOffset += sizeToAllocate;
2081 
2082     if (!checkedNextWriteOffset.IsValid() || checkedNextWriteOffset.ValueOrDie() >= mSize)
2083     {
2084         if (mBuffer)
2085         {
2086             // Make sure the buffer is not released externally.
2087             ASSERT(mBuffer->valid());
2088 
2089             ANGLE_TRY(flush(contextVk));
2090 
2091             mInFlightBuffers.push_back(std::move(mBuffer));
2092             ASSERT(!mBuffer);
2093         }
2094 
2095         const size_t sizeIgnoringHistory = std::max(mInitialSize, sizeToAllocate);
2096         if (sizeToAllocate > mSize || sizeIgnoringHistory < mSize / 4)
2097         {
2098             mSize = sizeIgnoringHistory;
2099 
2100             // Clear the free list since the free buffers are now either too small or too big.
2101             ReleaseBufferListToRenderer(contextVk->getRenderer(), &mBufferFreeList);
2102         }
2103 
2104         // The front of the free list should be the oldest. Thus if it is in use the rest of the
2105         // free list should be in use as well.
2106         if (mBufferFreeList.empty() ||
2107             mBufferFreeList.front()->isCurrentlyInUse(contextVk->getLastCompletedQueueSerial()))
2108         {
2109             ANGLE_TRY(allocateNewBuffer(contextVk));
2110         }
2111         else
2112         {
2113             mBuffer = std::move(mBufferFreeList.front());
2114             mBufferFreeList.erase(mBufferFreeList.begin());
2115         }
2116 
2117         ASSERT(mBuffer->getSize() == mSize);
2118 
2119         mNextAllocationOffset        = 0;
2120         mLastFlushOrInvalidateOffset = 0;
2121 
2122         if (newBufferAllocatedOut != nullptr)
2123         {
2124             *newBufferAllocatedOut = true;
2125         }
2126     }
2127     else if (newBufferAllocatedOut != nullptr)
2128     {
2129         *newBufferAllocatedOut = false;
2130     }
2131 
2132     ASSERT(mBuffer != nullptr);
2133 
2134     if (bufferOut != nullptr)
2135     {
2136         *bufferOut = mBuffer->getBuffer().getHandle();
2137     }
2138 
2139     // Optionally map() the buffer if possible
2140     if (ptrOut)
2141     {
2142         ASSERT(mHostVisible);
2143         uint8_t *mappedMemory;
2144         ANGLE_TRY(mBuffer->map(contextVk, &mappedMemory));
2145         *ptrOut = mappedMemory + mNextAllocationOffset;
2146     }
2147 
2148     if (offsetOut != nullptr)
2149     {
2150         *offsetOut = static_cast<VkDeviceSize>(mNextAllocationOffset);
2151     }
2152 
2153     mNextAllocationOffset += static_cast<uint32_t>(sizeToAllocate);
2154     return angle::Result::Continue;
2155 }
2156 
flush(ContextVk * contextVk)2157 angle::Result DynamicBuffer::flush(ContextVk *contextVk)
2158 {
2159     if (mHostVisible && (mNextAllocationOffset > mLastFlushOrInvalidateOffset))
2160     {
2161         ASSERT(mBuffer != nullptr);
2162         ANGLE_TRY(mBuffer->flush(contextVk->getRenderer(), mLastFlushOrInvalidateOffset,
2163                                  mNextAllocationOffset - mLastFlushOrInvalidateOffset));
2164         mLastFlushOrInvalidateOffset = mNextAllocationOffset;
2165     }
2166     return angle::Result::Continue;
2167 }
2168 
invalidate(ContextVk * contextVk)2169 angle::Result DynamicBuffer::invalidate(ContextVk *contextVk)
2170 {
2171     if (mHostVisible && (mNextAllocationOffset > mLastFlushOrInvalidateOffset))
2172     {
2173         ASSERT(mBuffer != nullptr);
2174         ANGLE_TRY(mBuffer->invalidate(contextVk->getRenderer(), mLastFlushOrInvalidateOffset,
2175                                       mNextAllocationOffset - mLastFlushOrInvalidateOffset));
2176         mLastFlushOrInvalidateOffset = mNextAllocationOffset;
2177     }
2178     return angle::Result::Continue;
2179 }
2180 
release(RendererVk * renderer)2181 void DynamicBuffer::release(RendererVk *renderer)
2182 {
2183     reset();
2184 
2185     ReleaseBufferListToRenderer(renderer, &mInFlightBuffers);
2186     ReleaseBufferListToRenderer(renderer, &mBufferFreeList);
2187 
2188     if (mBuffer)
2189     {
2190         mBuffer->release(renderer);
2191         mBuffer.reset(nullptr);
2192     }
2193 }
2194 
releaseInFlightBuffersToResourceUseList(ContextVk * contextVk)2195 void DynamicBuffer::releaseInFlightBuffersToResourceUseList(ContextVk *contextVk)
2196 {
2197     ResourceUseList *resourceUseList = &contextVk->getResourceUseList();
2198     for (std::unique_ptr<BufferHelper> &bufferHelper : mInFlightBuffers)
2199     {
2200         bufferHelper->retain(resourceUseList);
2201 
2202         if (ShouldReleaseFreeBuffer(*bufferHelper, mSize, mPolicy, mBufferFreeList.size()))
2203         {
2204             bufferHelper->release(contextVk->getRenderer());
2205         }
2206         else
2207         {
2208             bufferHelper->unmap(contextVk->getRenderer());
2209             mBufferFreeList.push_back(std::move(bufferHelper));
2210         }
2211     }
2212     mInFlightBuffers.clear();
2213 }
2214 
releaseInFlightBuffers(ContextVk * contextVk)2215 void DynamicBuffer::releaseInFlightBuffers(ContextVk *contextVk)
2216 {
2217     for (std::unique_ptr<BufferHelper> &toRelease : mInFlightBuffers)
2218     {
2219         if (ShouldReleaseFreeBuffer(*toRelease, mSize, mPolicy, mBufferFreeList.size()))
2220         {
2221             toRelease->release(contextVk->getRenderer());
2222         }
2223         else
2224         {
2225             toRelease->unmap(contextVk->getRenderer());
2226             mBufferFreeList.push_back(std::move(toRelease));
2227         }
2228     }
2229 
2230     mInFlightBuffers.clear();
2231 }
2232 
destroy(RendererVk * renderer)2233 void DynamicBuffer::destroy(RendererVk *renderer)
2234 {
2235     reset();
2236 
2237     DestroyBufferList(renderer, &mInFlightBuffers);
2238     DestroyBufferList(renderer, &mBufferFreeList);
2239 
2240     if (mBuffer)
2241     {
2242         mBuffer->unmap(renderer);
2243         mBuffer->destroy(renderer);
2244         mBuffer.reset(nullptr);
2245     }
2246 }
2247 
requireAlignment(RendererVk * renderer,size_t alignment)2248 void DynamicBuffer::requireAlignment(RendererVk *renderer, size_t alignment)
2249 {
2250     ASSERT(alignment > 0);
2251 
2252     size_t prevAlignment = mAlignment;
2253 
2254     // If alignment was never set, initialize it with the atom size limit.
2255     if (prevAlignment == 0)
2256     {
2257         prevAlignment =
2258             static_cast<size_t>(renderer->getPhysicalDeviceProperties().limits.nonCoherentAtomSize);
2259         ASSERT(gl::isPow2(prevAlignment));
2260     }
2261 
2262     // We need lcm(prevAlignment, alignment).  Usually, one divides the other so std::max() could be
2263     // used instead.  Only known case where this assumption breaks is for 3-component types with
2264     // 16- or 32-bit channels, so that's special-cased to avoid a full-fledged lcm implementation.
2265 
2266     if (gl::isPow2(prevAlignment * alignment))
2267     {
2268         ASSERT(alignment % prevAlignment == 0 || prevAlignment % alignment == 0);
2269 
2270         alignment = std::max(prevAlignment, alignment);
2271     }
2272     else
2273     {
2274         ASSERT(prevAlignment % 3 != 0 || gl::isPow2(prevAlignment / 3));
2275         ASSERT(alignment % 3 != 0 || gl::isPow2(alignment / 3));
2276 
2277         prevAlignment = prevAlignment % 3 == 0 ? prevAlignment / 3 : prevAlignment;
2278         alignment     = alignment % 3 == 0 ? alignment / 3 : alignment;
2279 
2280         alignment = std::max(prevAlignment, alignment) * 3;
2281     }
2282 
2283     // If alignment has changed, make sure the next allocation is done at an aligned offset.
2284     if (alignment != mAlignment)
2285     {
2286         mNextAllocationOffset = roundUp(mNextAllocationOffset, static_cast<uint32_t>(alignment));
2287     }
2288 
2289     mAlignment = alignment;
2290 }
2291 
setMinimumSizeForTesting(size_t minSize)2292 void DynamicBuffer::setMinimumSizeForTesting(size_t minSize)
2293 {
2294     // This will really only have an effect next time we call allocate.
2295     mInitialSize = minSize;
2296 
2297     // Forces a new allocation on the next allocate.
2298     mSize = 0;
2299 }
2300 
reset()2301 void DynamicBuffer::reset()
2302 {
2303     mSize                        = 0;
2304     mNextAllocationOffset        = 0;
2305     mLastFlushOrInvalidateOffset = 0;
2306 }
2307 
2308 // DynamicShadowBuffer implementation.
DynamicShadowBuffer()2309 DynamicShadowBuffer::DynamicShadowBuffer() : mInitialSize(0), mSize(0) {}
2310 
DynamicShadowBuffer(DynamicShadowBuffer && other)2311 DynamicShadowBuffer::DynamicShadowBuffer(DynamicShadowBuffer &&other)
2312     : mInitialSize(other.mInitialSize), mSize(other.mSize), mBuffer(std::move(other.mBuffer))
2313 {}
2314 
init(size_t initialSize)2315 void DynamicShadowBuffer::init(size_t initialSize)
2316 {
2317     mInitialSize = initialSize;
2318 }
2319 
~DynamicShadowBuffer()2320 DynamicShadowBuffer::~DynamicShadowBuffer()
2321 {
2322     ASSERT(mBuffer.empty());
2323 }
2324 
allocate(size_t sizeInBytes)2325 angle::Result DynamicShadowBuffer::allocate(size_t sizeInBytes)
2326 {
2327     bool result = true;
2328 
2329     // Delete the current buffer, if any
2330     if (!mBuffer.empty())
2331     {
2332         result &= mBuffer.resize(0);
2333     }
2334 
2335     // Cache the new size
2336     mSize = std::max(mInitialSize, sizeInBytes);
2337 
2338     // Allocate the buffer
2339     result &= mBuffer.resize(mSize);
2340 
2341     // If allocation failed, release the buffer and return error.
2342     if (!result)
2343     {
2344         release();
2345         return angle::Result::Stop;
2346     }
2347 
2348     return angle::Result::Continue;
2349 }
2350 
release()2351 void DynamicShadowBuffer::release()
2352 {
2353     reset();
2354 
2355     if (!mBuffer.empty())
2356     {
2357         (void)mBuffer.resize(0);
2358     }
2359 }
2360 
destroy(VkDevice device)2361 void DynamicShadowBuffer::destroy(VkDevice device)
2362 {
2363     release();
2364 }
2365 
reset()2366 void DynamicShadowBuffer::reset()
2367 {
2368     mSize = 0;
2369 }
2370 
2371 // DescriptorPoolHelper implementation.
DescriptorPoolHelper()2372 DescriptorPoolHelper::DescriptorPoolHelper() : mFreeDescriptorSets(0) {}
2373 
2374 DescriptorPoolHelper::~DescriptorPoolHelper() = default;
2375 
hasCapacity(uint32_t descriptorSetCount) const2376 bool DescriptorPoolHelper::hasCapacity(uint32_t descriptorSetCount) const
2377 {
2378     return mFreeDescriptorSets >= descriptorSetCount;
2379 }
2380 
init(ContextVk * contextVk,const std::vector<VkDescriptorPoolSize> & poolSizesIn,uint32_t maxSets)2381 angle::Result DescriptorPoolHelper::init(ContextVk *contextVk,
2382                                          const std::vector<VkDescriptorPoolSize> &poolSizesIn,
2383                                          uint32_t maxSets)
2384 {
2385     if (mDescriptorPool.valid())
2386     {
2387         ASSERT(!isCurrentlyInUse(contextVk->getLastCompletedQueueSerial()));
2388         mDescriptorPool.destroy(contextVk->getDevice());
2389     }
2390 
2391     // Make a copy of the pool sizes, so we can grow them to satisfy the specified maxSets.
2392     std::vector<VkDescriptorPoolSize> poolSizes = poolSizesIn;
2393 
2394     for (VkDescriptorPoolSize &poolSize : poolSizes)
2395     {
2396         poolSize.descriptorCount *= maxSets;
2397     }
2398 
2399     VkDescriptorPoolCreateInfo descriptorPoolInfo = {};
2400     descriptorPoolInfo.sType                      = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
2401     descriptorPoolInfo.flags                      = 0;
2402     descriptorPoolInfo.maxSets                    = maxSets;
2403     descriptorPoolInfo.poolSizeCount              = static_cast<uint32_t>(poolSizes.size());
2404     descriptorPoolInfo.pPoolSizes                 = poolSizes.data();
2405 
2406     mFreeDescriptorSets = maxSets;
2407 
2408     ANGLE_VK_TRY(contextVk, mDescriptorPool.init(contextVk->getDevice(), descriptorPoolInfo));
2409 
2410     return angle::Result::Continue;
2411 }
2412 
destroy(VkDevice device)2413 void DescriptorPoolHelper::destroy(VkDevice device)
2414 {
2415     mDescriptorPool.destroy(device);
2416 }
2417 
release(ContextVk * contextVk)2418 void DescriptorPoolHelper::release(ContextVk *contextVk)
2419 {
2420     contextVk->addGarbage(&mDescriptorPool);
2421 }
2422 
allocateSets(ContextVk * contextVk,const VkDescriptorSetLayout * descriptorSetLayout,uint32_t descriptorSetCount,VkDescriptorSet * descriptorSetsOut)2423 angle::Result DescriptorPoolHelper::allocateSets(ContextVk *contextVk,
2424                                                  const VkDescriptorSetLayout *descriptorSetLayout,
2425                                                  uint32_t descriptorSetCount,
2426                                                  VkDescriptorSet *descriptorSetsOut)
2427 {
2428     VkDescriptorSetAllocateInfo allocInfo = {};
2429     allocInfo.sType                       = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
2430     allocInfo.descriptorPool              = mDescriptorPool.getHandle();
2431     allocInfo.descriptorSetCount          = descriptorSetCount;
2432     allocInfo.pSetLayouts                 = descriptorSetLayout;
2433 
2434     ASSERT(mFreeDescriptorSets >= descriptorSetCount);
2435     mFreeDescriptorSets -= descriptorSetCount;
2436 
2437     ANGLE_VK_TRY(contextVk, mDescriptorPool.allocateDescriptorSets(contextVk->getDevice(),
2438                                                                    allocInfo, descriptorSetsOut));
2439 
2440     // The pool is still in use every time a new descriptor set is allocated from it.
2441     retain(&contextVk->getResourceUseList());
2442 
2443     return angle::Result::Continue;
2444 }
2445 
2446 // DynamicDescriptorPool implementation.
DynamicDescriptorPool()2447 DynamicDescriptorPool::DynamicDescriptorPool()
2448     : mCurrentPoolIndex(0), mCachedDescriptorSetLayout(VK_NULL_HANDLE)
2449 {}
2450 
2451 DynamicDescriptorPool::~DynamicDescriptorPool() = default;
2452 
init(ContextVk * contextVk,const VkDescriptorPoolSize * setSizes,size_t setSizeCount,VkDescriptorSetLayout descriptorSetLayout)2453 angle::Result DynamicDescriptorPool::init(ContextVk *contextVk,
2454                                           const VkDescriptorPoolSize *setSizes,
2455                                           size_t setSizeCount,
2456                                           VkDescriptorSetLayout descriptorSetLayout)
2457 {
2458     ASSERT(setSizes);
2459     ASSERT(setSizeCount);
2460     ASSERT(mCurrentPoolIndex == 0);
2461     ASSERT(mDescriptorPools.empty() ||
2462            (mDescriptorPools.size() == 1 &&
2463             mDescriptorPools[mCurrentPoolIndex]->get().hasCapacity(mMaxSetsPerPool)));
2464     ASSERT(mCachedDescriptorSetLayout == VK_NULL_HANDLE);
2465 
2466     mPoolSizes.assign(setSizes, setSizes + setSizeCount);
2467     mCachedDescriptorSetLayout = descriptorSetLayout;
2468 
2469     mDescriptorPools.push_back(new RefCountedDescriptorPoolHelper());
2470     mCurrentPoolIndex = mDescriptorPools.size() - 1;
2471     return mDescriptorPools[mCurrentPoolIndex]->get().init(contextVk, mPoolSizes, mMaxSetsPerPool);
2472 }
2473 
destroy(VkDevice device)2474 void DynamicDescriptorPool::destroy(VkDevice device)
2475 {
2476     for (RefCountedDescriptorPoolHelper *pool : mDescriptorPools)
2477     {
2478         ASSERT(!pool->isReferenced());
2479         pool->get().destroy(device);
2480         delete pool;
2481     }
2482 
2483     mDescriptorPools.clear();
2484     mCurrentPoolIndex          = 0;
2485     mCachedDescriptorSetLayout = VK_NULL_HANDLE;
2486 }
2487 
release(ContextVk * contextVk)2488 void DynamicDescriptorPool::release(ContextVk *contextVk)
2489 {
2490     for (RefCountedDescriptorPoolHelper *pool : mDescriptorPools)
2491     {
2492         ASSERT(!pool->isReferenced());
2493         pool->get().release(contextVk);
2494         delete pool;
2495     }
2496 
2497     mDescriptorPools.clear();
2498     mCurrentPoolIndex          = 0;
2499     mCachedDescriptorSetLayout = VK_NULL_HANDLE;
2500 }
2501 
allocateSetsAndGetInfo(ContextVk * contextVk,const VkDescriptorSetLayout * descriptorSetLayout,uint32_t descriptorSetCount,RefCountedDescriptorPoolBinding * bindingOut,VkDescriptorSet * descriptorSetsOut,bool * newPoolAllocatedOut)2502 angle::Result DynamicDescriptorPool::allocateSetsAndGetInfo(
2503     ContextVk *contextVk,
2504     const VkDescriptorSetLayout *descriptorSetLayout,
2505     uint32_t descriptorSetCount,
2506     RefCountedDescriptorPoolBinding *bindingOut,
2507     VkDescriptorSet *descriptorSetsOut,
2508     bool *newPoolAllocatedOut)
2509 {
2510     ASSERT(!mDescriptorPools.empty());
2511     ASSERT(*descriptorSetLayout == mCachedDescriptorSetLayout);
2512 
2513     *newPoolAllocatedOut = false;
2514 
2515     if (!bindingOut->valid() || !bindingOut->get().hasCapacity(descriptorSetCount))
2516     {
2517         if (!mDescriptorPools[mCurrentPoolIndex]->get().hasCapacity(descriptorSetCount))
2518         {
2519             ANGLE_TRY(allocateNewPool(contextVk));
2520             *newPoolAllocatedOut = true;
2521         }
2522 
2523         bindingOut->set(mDescriptorPools[mCurrentPoolIndex]);
2524     }
2525 
2526     return bindingOut->get().allocateSets(contextVk, descriptorSetLayout, descriptorSetCount,
2527                                           descriptorSetsOut);
2528 }
2529 
allocateNewPool(ContextVk * contextVk)2530 angle::Result DynamicDescriptorPool::allocateNewPool(ContextVk *contextVk)
2531 {
2532     bool found = false;
2533 
2534     Serial lastCompletedSerial = contextVk->getLastCompletedQueueSerial();
2535     for (size_t poolIndex = 0; poolIndex < mDescriptorPools.size(); ++poolIndex)
2536     {
2537         if (!mDescriptorPools[poolIndex]->isReferenced() &&
2538             !mDescriptorPools[poolIndex]->get().isCurrentlyInUse(lastCompletedSerial))
2539         {
2540             mCurrentPoolIndex = poolIndex;
2541             found             = true;
2542             break;
2543         }
2544     }
2545 
2546     if (!found)
2547     {
2548         mDescriptorPools.push_back(new RefCountedDescriptorPoolHelper());
2549         mCurrentPoolIndex = mDescriptorPools.size() - 1;
2550 
2551         static constexpr size_t kMaxPools = 99999;
2552         ANGLE_VK_CHECK(contextVk, mDescriptorPools.size() < kMaxPools, VK_ERROR_TOO_MANY_OBJECTS);
2553     }
2554 
2555     // This pool is getting hot, so grow its max size to try and prevent allocating another pool in
2556     // the future.
2557     if (mMaxSetsPerPool < kMaxSetsPerPoolMax)
2558     {
2559         mMaxSetsPerPool *= mMaxSetsPerPoolMultiplier;
2560     }
2561 
2562     return mDescriptorPools[mCurrentPoolIndex]->get().init(contextVk, mPoolSizes, mMaxSetsPerPool);
2563 }
2564 
2565 // For testing only!
GetMaxSetsPerPoolForTesting()2566 uint32_t DynamicDescriptorPool::GetMaxSetsPerPoolForTesting()
2567 {
2568     return mMaxSetsPerPool;
2569 }
2570 
2571 // For testing only!
SetMaxSetsPerPoolForTesting(uint32_t maxSetsPerPool)2572 void DynamicDescriptorPool::SetMaxSetsPerPoolForTesting(uint32_t maxSetsPerPool)
2573 {
2574     mMaxSetsPerPool = maxSetsPerPool;
2575 }
2576 
2577 // For testing only!
GetMaxSetsPerPoolMultiplierForTesting()2578 uint32_t DynamicDescriptorPool::GetMaxSetsPerPoolMultiplierForTesting()
2579 {
2580     return mMaxSetsPerPoolMultiplier;
2581 }
2582 
2583 // For testing only!
SetMaxSetsPerPoolMultiplierForTesting(uint32_t maxSetsPerPoolMultiplier)2584 void DynamicDescriptorPool::SetMaxSetsPerPoolMultiplierForTesting(uint32_t maxSetsPerPoolMultiplier)
2585 {
2586     mMaxSetsPerPoolMultiplier = maxSetsPerPoolMultiplier;
2587 }
2588 
2589 // DynamicallyGrowingPool implementation
2590 template <typename Pool>
DynamicallyGrowingPool()2591 DynamicallyGrowingPool<Pool>::DynamicallyGrowingPool()
2592     : mPoolSize(0), mCurrentPool(0), mCurrentFreeEntry(0)
2593 {}
2594 
2595 template <typename Pool>
2596 DynamicallyGrowingPool<Pool>::~DynamicallyGrowingPool() = default;
2597 
2598 template <typename Pool>
initEntryPool(Context * contextVk,uint32_t poolSize)2599 angle::Result DynamicallyGrowingPool<Pool>::initEntryPool(Context *contextVk, uint32_t poolSize)
2600 {
2601     ASSERT(mPools.empty() && mPoolStats.empty());
2602     mPoolSize = poolSize;
2603     return angle::Result::Continue;
2604 }
2605 
2606 template <typename Pool>
destroyEntryPool()2607 void DynamicallyGrowingPool<Pool>::destroyEntryPool()
2608 {
2609     mPools.clear();
2610     mPoolStats.clear();
2611 }
2612 
2613 template <typename Pool>
findFreeEntryPool(ContextVk * contextVk)2614 bool DynamicallyGrowingPool<Pool>::findFreeEntryPool(ContextVk *contextVk)
2615 {
2616     Serial lastCompletedQueueSerial = contextVk->getLastCompletedQueueSerial();
2617     for (size_t i = 0; i < mPools.size(); ++i)
2618     {
2619         if (mPoolStats[i].freedCount == mPoolSize &&
2620             mPoolStats[i].serial <= lastCompletedQueueSerial)
2621         {
2622             mCurrentPool      = i;
2623             mCurrentFreeEntry = 0;
2624 
2625             mPoolStats[i].freedCount = 0;
2626 
2627             return true;
2628         }
2629     }
2630 
2631     return false;
2632 }
2633 
2634 template <typename Pool>
allocateNewEntryPool(ContextVk * contextVk,Pool && pool)2635 angle::Result DynamicallyGrowingPool<Pool>::allocateNewEntryPool(ContextVk *contextVk, Pool &&pool)
2636 {
2637     mPools.push_back(std::move(pool));
2638 
2639     PoolStats poolStats = {0, Serial()};
2640     mPoolStats.push_back(poolStats);
2641 
2642     mCurrentPool      = mPools.size() - 1;
2643     mCurrentFreeEntry = 0;
2644 
2645     return angle::Result::Continue;
2646 }
2647 
2648 template <typename Pool>
onEntryFreed(ContextVk * contextVk,size_t poolIndex)2649 void DynamicallyGrowingPool<Pool>::onEntryFreed(ContextVk *contextVk, size_t poolIndex)
2650 {
2651     ASSERT(poolIndex < mPoolStats.size() && mPoolStats[poolIndex].freedCount < mPoolSize);
2652 
2653     // Take note of the current serial to avoid reallocating a query in the same pool
2654     mPoolStats[poolIndex].serial = contextVk->getCurrentQueueSerial();
2655     ++mPoolStats[poolIndex].freedCount;
2656 }
2657 
2658 // DynamicQueryPool implementation
2659 DynamicQueryPool::DynamicQueryPool() = default;
2660 
2661 DynamicQueryPool::~DynamicQueryPool() = default;
2662 
init(ContextVk * contextVk,VkQueryType type,uint32_t poolSize)2663 angle::Result DynamicQueryPool::init(ContextVk *contextVk, VkQueryType type, uint32_t poolSize)
2664 {
2665     ANGLE_TRY(initEntryPool(contextVk, poolSize));
2666 
2667     mQueryType = type;
2668     ANGLE_TRY(allocateNewPool(contextVk));
2669 
2670     return angle::Result::Continue;
2671 }
2672 
destroy(VkDevice device)2673 void DynamicQueryPool::destroy(VkDevice device)
2674 {
2675     for (QueryPool &queryPool : mPools)
2676     {
2677         queryPool.destroy(device);
2678     }
2679 
2680     destroyEntryPool();
2681 }
2682 
allocateQuery(ContextVk * contextVk,QueryHelper * queryOut,uint32_t queryCount)2683 angle::Result DynamicQueryPool::allocateQuery(ContextVk *contextVk,
2684                                               QueryHelper *queryOut,
2685                                               uint32_t queryCount)
2686 {
2687     ASSERT(!queryOut->valid());
2688 
2689     if (mCurrentFreeEntry + queryCount > mPoolSize)
2690     {
2691         // No more queries left in this pool, create another one.
2692         ANGLE_TRY(allocateNewPool(contextVk));
2693     }
2694 
2695     uint32_t queryIndex = mCurrentFreeEntry;
2696     mCurrentFreeEntry += queryCount;
2697 
2698     queryOut->init(this, mCurrentPool, queryIndex, queryCount);
2699 
2700     return angle::Result::Continue;
2701 }
2702 
freeQuery(ContextVk * contextVk,QueryHelper * query)2703 void DynamicQueryPool::freeQuery(ContextVk *contextVk, QueryHelper *query)
2704 {
2705     if (query->valid())
2706     {
2707         size_t poolIndex = query->mQueryPoolIndex;
2708         ASSERT(getQueryPool(poolIndex).valid());
2709 
2710         onEntryFreed(contextVk, poolIndex);
2711 
2712         query->deinit();
2713     }
2714 }
2715 
allocateNewPool(ContextVk * contextVk)2716 angle::Result DynamicQueryPool::allocateNewPool(ContextVk *contextVk)
2717 {
2718     if (findFreeEntryPool(contextVk))
2719     {
2720         return angle::Result::Continue;
2721     }
2722 
2723     VkQueryPoolCreateInfo queryPoolInfo = {};
2724     queryPoolInfo.sType                 = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
2725     queryPoolInfo.flags                 = 0;
2726     queryPoolInfo.queryType             = mQueryType;
2727     queryPoolInfo.queryCount            = mPoolSize;
2728     queryPoolInfo.pipelineStatistics    = 0;
2729 
2730     if (mQueryType == VK_QUERY_TYPE_PIPELINE_STATISTICS)
2731     {
2732         queryPoolInfo.pipelineStatistics = VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT;
2733     }
2734 
2735     QueryPool queryPool;
2736 
2737     ANGLE_VK_TRY(contextVk, queryPool.init(contextVk->getDevice(), queryPoolInfo));
2738 
2739     return allocateNewEntryPool(contextVk, std::move(queryPool));
2740 }
2741 
2742 // QueryResult implementation
setResults(uint64_t * results,uint32_t queryCount)2743 void QueryResult::setResults(uint64_t *results, uint32_t queryCount)
2744 {
2745     ASSERT(mResults[0] == 0 && mResults[1] == 0);
2746 
2747     // Accumulate the query results.  For multiview, where multiple query indices are used to return
2748     // the results, it's undefined how the results are distributed between indices, but the sum is
2749     // guaranteed to be the desired result.
2750     for (uint32_t query = 0; query < queryCount; ++query)
2751     {
2752         for (uint32_t perQueryIndex = 0; perQueryIndex < mIntsPerResult; ++perQueryIndex)
2753         {
2754             mResults[perQueryIndex] += results[query * mIntsPerResult + perQueryIndex];
2755         }
2756     }
2757 }
2758 
2759 // QueryHelper implementation
QueryHelper()2760 QueryHelper::QueryHelper()
2761     : mDynamicQueryPool(nullptr), mQueryPoolIndex(0), mQuery(0), mQueryCount(0)
2762 {}
2763 
~QueryHelper()2764 QueryHelper::~QueryHelper() {}
2765 
2766 // Move constructor
QueryHelper(QueryHelper && rhs)2767 QueryHelper::QueryHelper(QueryHelper &&rhs)
2768     : Resource(std::move(rhs)),
2769       mDynamicQueryPool(rhs.mDynamicQueryPool),
2770       mQueryPoolIndex(rhs.mQueryPoolIndex),
2771       mQuery(rhs.mQuery),
2772       mQueryCount(rhs.mQueryCount)
2773 {
2774     rhs.mDynamicQueryPool = nullptr;
2775     rhs.mQueryPoolIndex   = 0;
2776     rhs.mQuery            = 0;
2777     rhs.mQueryCount       = 0;
2778 }
2779 
operator =(QueryHelper && rhs)2780 QueryHelper &QueryHelper::operator=(QueryHelper &&rhs)
2781 {
2782     std::swap(mDynamicQueryPool, rhs.mDynamicQueryPool);
2783     std::swap(mQueryPoolIndex, rhs.mQueryPoolIndex);
2784     std::swap(mQuery, rhs.mQuery);
2785     std::swap(mQueryCount, rhs.mQueryCount);
2786     return *this;
2787 }
2788 
init(const DynamicQueryPool * dynamicQueryPool,const size_t queryPoolIndex,uint32_t query,uint32_t queryCount)2789 void QueryHelper::init(const DynamicQueryPool *dynamicQueryPool,
2790                        const size_t queryPoolIndex,
2791                        uint32_t query,
2792                        uint32_t queryCount)
2793 {
2794     mDynamicQueryPool = dynamicQueryPool;
2795     mQueryPoolIndex   = queryPoolIndex;
2796     mQuery            = query;
2797     mQueryCount       = queryCount;
2798 
2799     ASSERT(mQueryCount <= gl::IMPLEMENTATION_ANGLE_MULTIVIEW_MAX_VIEWS);
2800 }
2801 
deinit()2802 void QueryHelper::deinit()
2803 {
2804     mDynamicQueryPool = nullptr;
2805     mQueryPoolIndex   = 0;
2806     mQuery            = 0;
2807     mQueryCount       = 0;
2808     mUse.release();
2809     mUse.init();
2810 }
2811 
beginQueryImpl(ContextVk * contextVk,CommandBuffer * resetCommandBuffer,CommandBuffer * commandBuffer)2812 void QueryHelper::beginQueryImpl(ContextVk *contextVk,
2813                                  CommandBuffer *resetCommandBuffer,
2814                                  CommandBuffer *commandBuffer)
2815 {
2816     const QueryPool &queryPool = getQueryPool();
2817     resetCommandBuffer->resetQueryPool(queryPool, mQuery, mQueryCount);
2818     commandBuffer->beginQuery(queryPool, mQuery, 0);
2819 }
2820 
endQueryImpl(ContextVk * contextVk,CommandBuffer * commandBuffer)2821 void QueryHelper::endQueryImpl(ContextVk *contextVk, CommandBuffer *commandBuffer)
2822 {
2823     commandBuffer->endQuery(getQueryPool(), mQuery);
2824 
2825     // Query results are available after endQuery, retain this query so that we get its serial
2826     // updated which is used to indicate that query results are (or will be) available.
2827     retain(&contextVk->getResourceUseList());
2828 }
2829 
beginQuery(ContextVk * contextVk)2830 angle::Result QueryHelper::beginQuery(ContextVk *contextVk)
2831 {
2832     if (contextVk->hasStartedRenderPass())
2833     {
2834         ANGLE_TRY(contextVk->flushCommandsAndEndRenderPass());
2835     }
2836 
2837     CommandBuffer *commandBuffer;
2838     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer({}, &commandBuffer));
2839 
2840     ANGLE_TRY(contextVk->handleGraphicsEventLog(rx::GraphicsEventCmdBuf::InOutsideCmdBufQueryCmd));
2841 
2842     beginQueryImpl(contextVk, commandBuffer, commandBuffer);
2843 
2844     return angle::Result::Continue;
2845 }
2846 
endQuery(ContextVk * contextVk)2847 angle::Result QueryHelper::endQuery(ContextVk *contextVk)
2848 {
2849     if (contextVk->hasStartedRenderPass())
2850     {
2851         ANGLE_TRY(contextVk->flushCommandsAndEndRenderPass());
2852     }
2853 
2854     CommandBuffer *commandBuffer;
2855     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer({}, &commandBuffer));
2856 
2857     ANGLE_TRY(contextVk->handleGraphicsEventLog(rx::GraphicsEventCmdBuf::InOutsideCmdBufQueryCmd));
2858 
2859     endQueryImpl(contextVk, commandBuffer);
2860 
2861     return angle::Result::Continue;
2862 }
2863 
beginRenderPassQuery(ContextVk * contextVk)2864 angle::Result QueryHelper::beginRenderPassQuery(ContextVk *contextVk)
2865 {
2866     CommandBuffer *outsideRenderPassCommandBuffer;
2867     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer({}, &outsideRenderPassCommandBuffer));
2868 
2869     CommandBuffer *renderPassCommandBuffer =
2870         &contextVk->getStartedRenderPassCommands().getCommandBuffer();
2871 
2872     beginQueryImpl(contextVk, outsideRenderPassCommandBuffer, renderPassCommandBuffer);
2873 
2874     return angle::Result::Continue;
2875 }
2876 
endRenderPassQuery(ContextVk * contextVk)2877 void QueryHelper::endRenderPassQuery(ContextVk *contextVk)
2878 {
2879     endQueryImpl(contextVk, &contextVk->getStartedRenderPassCommands().getCommandBuffer());
2880 }
2881 
flushAndWriteTimestamp(ContextVk * contextVk)2882 angle::Result QueryHelper::flushAndWriteTimestamp(ContextVk *contextVk)
2883 {
2884     if (contextVk->hasStartedRenderPass())
2885     {
2886         ANGLE_TRY(contextVk->flushCommandsAndEndRenderPass());
2887     }
2888 
2889     CommandBuffer *commandBuffer;
2890     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer({}, &commandBuffer));
2891     writeTimestamp(contextVk, commandBuffer);
2892     return angle::Result::Continue;
2893 }
2894 
writeTimestampToPrimary(ContextVk * contextVk,PrimaryCommandBuffer * primary)2895 void QueryHelper::writeTimestampToPrimary(ContextVk *contextVk, PrimaryCommandBuffer *primary)
2896 {
2897     // Note that commands may not be flushed at this point.
2898 
2899     const QueryPool &queryPool = getQueryPool();
2900     primary->resetQueryPool(queryPool, mQuery, mQueryCount);
2901     primary->writeTimestamp(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, queryPool, mQuery);
2902 }
2903 
writeTimestamp(ContextVk * contextVk,CommandBuffer * commandBuffer)2904 void QueryHelper::writeTimestamp(ContextVk *contextVk, CommandBuffer *commandBuffer)
2905 {
2906     const QueryPool &queryPool = getQueryPool();
2907     commandBuffer->resetQueryPool(queryPool, mQuery, mQueryCount);
2908     commandBuffer->writeTimestamp(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, queryPool, mQuery);
2909     // timestamp results are available immediately, retain this query so that we get its serial
2910     // updated which is used to indicate that query results are (or will be) available.
2911     retain(&contextVk->getResourceUseList());
2912 }
2913 
hasSubmittedCommands() const2914 bool QueryHelper::hasSubmittedCommands() const
2915 {
2916     return mUse.getSerial().valid();
2917 }
2918 
getUint64ResultNonBlocking(ContextVk * contextVk,QueryResult * resultOut,bool * availableOut)2919 angle::Result QueryHelper::getUint64ResultNonBlocking(ContextVk *contextVk,
2920                                                       QueryResult *resultOut,
2921                                                       bool *availableOut)
2922 {
2923     ASSERT(valid());
2924     VkResult result;
2925 
2926     // Ensure that we only wait if we have inserted a query in command buffer. Otherwise you will
2927     // wait forever and trigger GPU timeout.
2928     if (hasSubmittedCommands())
2929     {
2930         constexpr VkQueryResultFlags kFlags = VK_QUERY_RESULT_64_BIT;
2931         result                              = getResultImpl(contextVk, kFlags, resultOut);
2932     }
2933     else
2934     {
2935         result     = VK_SUCCESS;
2936         *resultOut = 0;
2937     }
2938 
2939     if (result == VK_NOT_READY)
2940     {
2941         *availableOut = false;
2942         return angle::Result::Continue;
2943     }
2944     else
2945     {
2946         ANGLE_VK_TRY(contextVk, result);
2947         *availableOut = true;
2948     }
2949     return angle::Result::Continue;
2950 }
2951 
getUint64Result(ContextVk * contextVk,QueryResult * resultOut)2952 angle::Result QueryHelper::getUint64Result(ContextVk *contextVk, QueryResult *resultOut)
2953 {
2954     ASSERT(valid());
2955     if (hasSubmittedCommands())
2956     {
2957         constexpr VkQueryResultFlags kFlags = VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT;
2958         ANGLE_VK_TRY(contextVk, getResultImpl(contextVk, kFlags, resultOut));
2959     }
2960     else
2961     {
2962         *resultOut = 0;
2963     }
2964     return angle::Result::Continue;
2965 }
2966 
getResultImpl(ContextVk * contextVk,const VkQueryResultFlags flags,QueryResult * resultOut)2967 VkResult QueryHelper::getResultImpl(ContextVk *contextVk,
2968                                     const VkQueryResultFlags flags,
2969                                     QueryResult *resultOut)
2970 {
2971     std::array<uint64_t, 2 * gl::IMPLEMENTATION_ANGLE_MULTIVIEW_MAX_VIEWS> results;
2972 
2973     VkDevice device = contextVk->getDevice();
2974     VkResult result = getQueryPool().getResults(device, mQuery, mQueryCount, sizeof(results),
2975                                                 results.data(), sizeof(uint64_t), flags);
2976 
2977     if (result == VK_SUCCESS)
2978     {
2979         resultOut->setResults(results.data(), mQueryCount);
2980     }
2981 
2982     return result;
2983 }
2984 
2985 // DynamicSemaphorePool implementation
2986 DynamicSemaphorePool::DynamicSemaphorePool() = default;
2987 
2988 DynamicSemaphorePool::~DynamicSemaphorePool() = default;
2989 
init(ContextVk * contextVk,uint32_t poolSize)2990 angle::Result DynamicSemaphorePool::init(ContextVk *contextVk, uint32_t poolSize)
2991 {
2992     ANGLE_TRY(initEntryPool(contextVk, poolSize));
2993     ANGLE_TRY(allocateNewPool(contextVk));
2994     return angle::Result::Continue;
2995 }
2996 
destroy(VkDevice device)2997 void DynamicSemaphorePool::destroy(VkDevice device)
2998 {
2999     for (auto &semaphorePool : mPools)
3000     {
3001         for (Semaphore &semaphore : semaphorePool)
3002         {
3003             semaphore.destroy(device);
3004         }
3005     }
3006 
3007     destroyEntryPool();
3008 }
3009 
allocateSemaphore(ContextVk * contextVk,SemaphoreHelper * semaphoreOut)3010 angle::Result DynamicSemaphorePool::allocateSemaphore(ContextVk *contextVk,
3011                                                       SemaphoreHelper *semaphoreOut)
3012 {
3013     ASSERT(!semaphoreOut->getSemaphore());
3014 
3015     if (mCurrentFreeEntry >= mPoolSize)
3016     {
3017         // No more queries left in this pool, create another one.
3018         ANGLE_TRY(allocateNewPool(contextVk));
3019     }
3020 
3021     semaphoreOut->init(mCurrentPool, &mPools[mCurrentPool][mCurrentFreeEntry++]);
3022 
3023     return angle::Result::Continue;
3024 }
3025 
freeSemaphore(ContextVk * contextVk,SemaphoreHelper * semaphore)3026 void DynamicSemaphorePool::freeSemaphore(ContextVk *contextVk, SemaphoreHelper *semaphore)
3027 {
3028     if (semaphore->getSemaphore())
3029     {
3030         onEntryFreed(contextVk, semaphore->getSemaphorePoolIndex());
3031         semaphore->deinit();
3032     }
3033 }
3034 
allocateNewPool(ContextVk * contextVk)3035 angle::Result DynamicSemaphorePool::allocateNewPool(ContextVk *contextVk)
3036 {
3037     if (findFreeEntryPool(contextVk))
3038     {
3039         return angle::Result::Continue;
3040     }
3041 
3042     std::vector<Semaphore> newPool(mPoolSize);
3043 
3044     for (Semaphore &semaphore : newPool)
3045     {
3046         ANGLE_VK_TRY(contextVk, semaphore.init(contextVk->getDevice()));
3047     }
3048 
3049     // This code is safe as long as the growth of the outer vector in vector<vector<T>> is done by
3050     // moving the inner vectors, making sure references to the inner vector remain intact.
3051     Semaphore *assertMove = mPools.size() > 0 ? mPools[0].data() : nullptr;
3052 
3053     ANGLE_TRY(allocateNewEntryPool(contextVk, std::move(newPool)));
3054 
3055     ASSERT(assertMove == nullptr || assertMove == mPools[0].data());
3056 
3057     return angle::Result::Continue;
3058 }
3059 
3060 // SemaphoreHelper implementation
SemaphoreHelper()3061 SemaphoreHelper::SemaphoreHelper() : mSemaphorePoolIndex(0), mSemaphore(0) {}
3062 
~SemaphoreHelper()3063 SemaphoreHelper::~SemaphoreHelper() {}
3064 
SemaphoreHelper(SemaphoreHelper && other)3065 SemaphoreHelper::SemaphoreHelper(SemaphoreHelper &&other)
3066     : mSemaphorePoolIndex(other.mSemaphorePoolIndex), mSemaphore(other.mSemaphore)
3067 {
3068     other.mSemaphore = nullptr;
3069 }
3070 
operator =(SemaphoreHelper && other)3071 SemaphoreHelper &SemaphoreHelper::operator=(SemaphoreHelper &&other)
3072 {
3073     std::swap(mSemaphorePoolIndex, other.mSemaphorePoolIndex);
3074     std::swap(mSemaphore, other.mSemaphore);
3075     return *this;
3076 }
3077 
init(const size_t semaphorePoolIndex,const Semaphore * semaphore)3078 void SemaphoreHelper::init(const size_t semaphorePoolIndex, const Semaphore *semaphore)
3079 {
3080     mSemaphorePoolIndex = semaphorePoolIndex;
3081     mSemaphore          = semaphore;
3082 }
3083 
deinit()3084 void SemaphoreHelper::deinit()
3085 {
3086     mSemaphorePoolIndex = 0;
3087     mSemaphore          = nullptr;
3088 }
3089 
3090 // LineLoopHelper implementation.
LineLoopHelper(RendererVk * renderer)3091 LineLoopHelper::LineLoopHelper(RendererVk *renderer)
3092 {
3093     // We need to use an alignment of the maximum size we're going to allocate, which is
3094     // VK_INDEX_TYPE_UINT32. When we switch from a drawElement to a drawArray call, the allocations
3095     // can vary in size. According to the Vulkan spec, when calling vkCmdBindIndexBuffer: 'The
3096     // sum of offset and the address of the range of VkDeviceMemory object that is backing buffer,
3097     // must be a multiple of the type indicated by indexType'.
3098     mDynamicIndexBuffer.init(renderer, kLineLoopDynamicBufferUsage, sizeof(uint32_t),
3099                              kLineLoopDynamicBufferInitialSize, true,
3100                              DynamicBufferPolicy::OneShotUse);
3101     mDynamicIndirectBuffer.init(renderer, kLineLoopDynamicIndirectBufferUsage, sizeof(uint32_t),
3102                                 kLineLoopDynamicIndirectBufferInitialSize, true,
3103                                 DynamicBufferPolicy::OneShotUse);
3104 }
3105 
3106 LineLoopHelper::~LineLoopHelper() = default;
3107 
getIndexBufferForDrawArrays(ContextVk * contextVk,uint32_t clampedVertexCount,GLint firstVertex,BufferHelper ** bufferOut,VkDeviceSize * offsetOut)3108 angle::Result LineLoopHelper::getIndexBufferForDrawArrays(ContextVk *contextVk,
3109                                                           uint32_t clampedVertexCount,
3110                                                           GLint firstVertex,
3111                                                           BufferHelper **bufferOut,
3112                                                           VkDeviceSize *offsetOut)
3113 {
3114     uint32_t *indices    = nullptr;
3115     size_t allocateBytes = sizeof(uint32_t) * (static_cast<size_t>(clampedVertexCount) + 1);
3116 
3117     mDynamicIndexBuffer.releaseInFlightBuffers(contextVk);
3118     ANGLE_TRY(mDynamicIndexBuffer.allocate(contextVk, allocateBytes,
3119                                            reinterpret_cast<uint8_t **>(&indices), nullptr,
3120                                            offsetOut, nullptr));
3121     *bufferOut = mDynamicIndexBuffer.getCurrentBuffer();
3122 
3123     // Note: there could be an overflow in this addition.
3124     uint32_t unsignedFirstVertex = static_cast<uint32_t>(firstVertex);
3125     uint32_t vertexCount         = (clampedVertexCount + unsignedFirstVertex);
3126     for (uint32_t vertexIndex = unsignedFirstVertex; vertexIndex < vertexCount; vertexIndex++)
3127     {
3128         *indices++ = vertexIndex;
3129     }
3130     *indices = unsignedFirstVertex;
3131 
3132     // Since we are not using the VK_MEMORY_PROPERTY_HOST_COHERENT_BIT flag when creating the
3133     // device memory in the StreamingBuffer, we always need to make sure we flush it after
3134     // writing.
3135     ANGLE_TRY(mDynamicIndexBuffer.flush(contextVk));
3136 
3137     return angle::Result::Continue;
3138 }
3139 
getIndexBufferForElementArrayBuffer(ContextVk * contextVk,BufferVk * elementArrayBufferVk,gl::DrawElementsType glIndexType,int indexCount,intptr_t elementArrayOffset,BufferHelper ** bufferOut,VkDeviceSize * bufferOffsetOut,uint32_t * indexCountOut)3140 angle::Result LineLoopHelper::getIndexBufferForElementArrayBuffer(ContextVk *contextVk,
3141                                                                   BufferVk *elementArrayBufferVk,
3142                                                                   gl::DrawElementsType glIndexType,
3143                                                                   int indexCount,
3144                                                                   intptr_t elementArrayOffset,
3145                                                                   BufferHelper **bufferOut,
3146                                                                   VkDeviceSize *bufferOffsetOut,
3147                                                                   uint32_t *indexCountOut)
3148 {
3149     if (glIndexType == gl::DrawElementsType::UnsignedByte ||
3150         contextVk->getState().isPrimitiveRestartEnabled())
3151     {
3152         ANGLE_TRACE_EVENT0("gpu.angle", "LineLoopHelper::getIndexBufferForElementArrayBuffer");
3153 
3154         void *srcDataMapping = nullptr;
3155         ANGLE_TRY(elementArrayBufferVk->mapImpl(contextVk, &srcDataMapping));
3156         ANGLE_TRY(streamIndices(contextVk, glIndexType, indexCount,
3157                                 static_cast<const uint8_t *>(srcDataMapping) + elementArrayOffset,
3158                                 bufferOut, bufferOffsetOut, indexCountOut));
3159         ANGLE_TRY(elementArrayBufferVk->unmapImpl(contextVk));
3160         return angle::Result::Continue;
3161     }
3162 
3163     *indexCountOut = indexCount + 1;
3164 
3165     uint32_t *indices    = nullptr;
3166     size_t unitSize      = contextVk->getVkIndexTypeSize(glIndexType);
3167     size_t allocateBytes = unitSize * (indexCount + 1) + 1;
3168 
3169     mDynamicIndexBuffer.releaseInFlightBuffers(contextVk);
3170     ANGLE_TRY(mDynamicIndexBuffer.allocate(contextVk, allocateBytes,
3171                                            reinterpret_cast<uint8_t **>(&indices), nullptr,
3172                                            bufferOffsetOut, nullptr));
3173     *bufferOut = mDynamicIndexBuffer.getCurrentBuffer();
3174 
3175     VkDeviceSize sourceBufferOffset = 0;
3176     BufferHelper *sourceBuffer = &elementArrayBufferVk->getBufferAndOffset(&sourceBufferOffset);
3177 
3178     VkDeviceSize sourceOffset = static_cast<VkDeviceSize>(elementArrayOffset) + sourceBufferOffset;
3179     uint64_t unitCount        = static_cast<VkDeviceSize>(indexCount);
3180     angle::FixedVector<VkBufferCopy, 3> copies = {
3181         {sourceOffset, *bufferOffsetOut, unitCount * unitSize},
3182         {sourceOffset, *bufferOffsetOut + unitCount * unitSize, unitSize},
3183     };
3184     if (contextVk->getRenderer()->getFeatures().extraCopyBufferRegion.enabled)
3185         copies.push_back({sourceOffset, *bufferOffsetOut + (unitCount + 1) * unitSize, 1});
3186 
3187     vk::CommandBufferAccess access;
3188     access.onBufferTransferWrite(*bufferOut);
3189     access.onBufferTransferRead(sourceBuffer);
3190 
3191     vk::CommandBuffer *commandBuffer;
3192     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(access, &commandBuffer));
3193 
3194     commandBuffer->copyBuffer(sourceBuffer->getBuffer(), (*bufferOut)->getBuffer(),
3195                               static_cast<uint32_t>(copies.size()), copies.data());
3196 
3197     ANGLE_TRY(mDynamicIndexBuffer.flush(contextVk));
3198     return angle::Result::Continue;
3199 }
3200 
streamIndices(ContextVk * contextVk,gl::DrawElementsType glIndexType,GLsizei indexCount,const uint8_t * srcPtr,BufferHelper ** bufferOut,VkDeviceSize * bufferOffsetOut,uint32_t * indexCountOut)3201 angle::Result LineLoopHelper::streamIndices(ContextVk *contextVk,
3202                                             gl::DrawElementsType glIndexType,
3203                                             GLsizei indexCount,
3204                                             const uint8_t *srcPtr,
3205                                             BufferHelper **bufferOut,
3206                                             VkDeviceSize *bufferOffsetOut,
3207                                             uint32_t *indexCountOut)
3208 {
3209     size_t unitSize = contextVk->getVkIndexTypeSize(glIndexType);
3210 
3211     uint8_t *indices = nullptr;
3212 
3213     uint32_t numOutIndices = indexCount + 1;
3214     if (contextVk->getState().isPrimitiveRestartEnabled())
3215     {
3216         numOutIndices = GetLineLoopWithRestartIndexCount(glIndexType, indexCount, srcPtr);
3217     }
3218     *indexCountOut       = numOutIndices;
3219     size_t allocateBytes = unitSize * numOutIndices;
3220     ANGLE_TRY(mDynamicIndexBuffer.allocate(contextVk, allocateBytes,
3221                                            reinterpret_cast<uint8_t **>(&indices), nullptr,
3222                                            bufferOffsetOut, nullptr));
3223     *bufferOut = mDynamicIndexBuffer.getCurrentBuffer();
3224 
3225     if (contextVk->getState().isPrimitiveRestartEnabled())
3226     {
3227         HandlePrimitiveRestart(contextVk, glIndexType, indexCount, srcPtr, indices);
3228     }
3229     else
3230     {
3231         if (contextVk->shouldConvertUint8VkIndexType(glIndexType))
3232         {
3233             // If vulkan doesn't support uint8 index types, we need to emulate it.
3234             VkIndexType indexType = contextVk->getVkIndexType(glIndexType);
3235             ASSERT(indexType == VK_INDEX_TYPE_UINT16);
3236             uint16_t *indicesDst = reinterpret_cast<uint16_t *>(indices);
3237             for (int i = 0; i < indexCount; i++)
3238             {
3239                 indicesDst[i] = srcPtr[i];
3240             }
3241 
3242             indicesDst[indexCount] = srcPtr[0];
3243         }
3244         else
3245         {
3246             memcpy(indices, srcPtr, unitSize * indexCount);
3247             memcpy(indices + unitSize * indexCount, srcPtr, unitSize);
3248         }
3249     }
3250 
3251     ANGLE_TRY(mDynamicIndexBuffer.flush(contextVk));
3252     return angle::Result::Continue;
3253 }
3254 
streamIndicesIndirect(ContextVk * contextVk,gl::DrawElementsType glIndexType,BufferHelper * indexBuffer,VkDeviceSize indexBufferOffset,BufferHelper * indirectBuffer,VkDeviceSize indirectBufferOffset,BufferHelper ** indexBufferOut,VkDeviceSize * indexBufferOffsetOut,BufferHelper ** indirectBufferOut,VkDeviceSize * indirectBufferOffsetOut)3255 angle::Result LineLoopHelper::streamIndicesIndirect(ContextVk *contextVk,
3256                                                     gl::DrawElementsType glIndexType,
3257                                                     BufferHelper *indexBuffer,
3258                                                     VkDeviceSize indexBufferOffset,
3259                                                     BufferHelper *indirectBuffer,
3260                                                     VkDeviceSize indirectBufferOffset,
3261                                                     BufferHelper **indexBufferOut,
3262                                                     VkDeviceSize *indexBufferOffsetOut,
3263                                                     BufferHelper **indirectBufferOut,
3264                                                     VkDeviceSize *indirectBufferOffsetOut)
3265 {
3266     size_t unitSize      = contextVk->getVkIndexTypeSize(glIndexType);
3267     size_t allocateBytes = static_cast<size_t>(indexBuffer->getSize() + unitSize);
3268 
3269     if (contextVk->getState().isPrimitiveRestartEnabled())
3270     {
3271         // If primitive restart, new index buffer is 135% the size of the original index buffer. The
3272         // smallest lineloop with primitive restart is 3 indices (point 1, point 2 and restart
3273         // value) when converted to linelist becomes 4 vertices. Expansion of 4/3. Any larger
3274         // lineloops would have less overhead and require less extra space. Any incomplete
3275         // primitives can be dropped or left incomplete and thus not increase the size of the
3276         // destination index buffer. Since we don't know the number of indices being used we'll use
3277         // the size of the index buffer as allocated as the index count.
3278         size_t numInputIndices    = static_cast<size_t>(indexBuffer->getSize() / unitSize);
3279         size_t numNewInputIndices = ((numInputIndices * 4) / 3) + 1;
3280         allocateBytes             = static_cast<size_t>(numNewInputIndices * unitSize);
3281     }
3282 
3283     mDynamicIndexBuffer.releaseInFlightBuffers(contextVk);
3284     mDynamicIndirectBuffer.releaseInFlightBuffers(contextVk);
3285 
3286     ANGLE_TRY(mDynamicIndexBuffer.allocate(contextVk, allocateBytes, nullptr, nullptr,
3287                                            indexBufferOffsetOut, nullptr));
3288     *indexBufferOut = mDynamicIndexBuffer.getCurrentBuffer();
3289 
3290     ANGLE_TRY(mDynamicIndirectBuffer.allocate(contextVk, sizeof(VkDrawIndexedIndirectCommand),
3291                                               nullptr, nullptr, indirectBufferOffsetOut, nullptr));
3292     *indirectBufferOut = mDynamicIndirectBuffer.getCurrentBuffer();
3293 
3294     BufferHelper *destIndexBuffer    = mDynamicIndexBuffer.getCurrentBuffer();
3295     BufferHelper *destIndirectBuffer = mDynamicIndirectBuffer.getCurrentBuffer();
3296 
3297     // Copy relevant section of the source into destination at allocated offset.  Note that the
3298     // offset returned by allocate() above is in bytes. As is the indices offset pointer.
3299     UtilsVk::ConvertLineLoopIndexIndirectParameters params = {};
3300     params.indirectBufferOffset    = static_cast<uint32_t>(indirectBufferOffset);
3301     params.dstIndirectBufferOffset = static_cast<uint32_t>(*indirectBufferOffsetOut);
3302     params.srcIndexBufferOffset    = static_cast<uint32_t>(indexBufferOffset);
3303     params.dstIndexBufferOffset    = static_cast<uint32_t>(*indexBufferOffsetOut);
3304     params.indicesBitsWidth        = static_cast<uint32_t>(unitSize * 8);
3305 
3306     ANGLE_TRY(contextVk->getUtils().convertLineLoopIndexIndirectBuffer(
3307         contextVk, indirectBuffer, destIndirectBuffer, destIndexBuffer, indexBuffer, params));
3308 
3309     return angle::Result::Continue;
3310 }
3311 
streamArrayIndirect(ContextVk * contextVk,size_t vertexCount,BufferHelper * arrayIndirectBuffer,VkDeviceSize arrayIndirectBufferOffset,BufferHelper ** indexBufferOut,VkDeviceSize * indexBufferOffsetOut,BufferHelper ** indexIndirectBufferOut,VkDeviceSize * indexIndirectBufferOffsetOut)3312 angle::Result LineLoopHelper::streamArrayIndirect(ContextVk *contextVk,
3313                                                   size_t vertexCount,
3314                                                   BufferHelper *arrayIndirectBuffer,
3315                                                   VkDeviceSize arrayIndirectBufferOffset,
3316                                                   BufferHelper **indexBufferOut,
3317                                                   VkDeviceSize *indexBufferOffsetOut,
3318                                                   BufferHelper **indexIndirectBufferOut,
3319                                                   VkDeviceSize *indexIndirectBufferOffsetOut)
3320 {
3321     auto unitSize        = sizeof(uint32_t);
3322     size_t allocateBytes = static_cast<size_t>((vertexCount + 1) * unitSize);
3323 
3324     mDynamicIndexBuffer.releaseInFlightBuffers(contextVk);
3325     mDynamicIndirectBuffer.releaseInFlightBuffers(contextVk);
3326 
3327     ANGLE_TRY(mDynamicIndexBuffer.allocate(contextVk, allocateBytes, nullptr, nullptr,
3328                                            indexBufferOffsetOut, nullptr));
3329     *indexBufferOut = mDynamicIndexBuffer.getCurrentBuffer();
3330 
3331     ANGLE_TRY(mDynamicIndirectBuffer.allocate(contextVk, sizeof(VkDrawIndexedIndirectCommand),
3332                                               nullptr, nullptr, indexIndirectBufferOffsetOut,
3333                                               nullptr));
3334     *indexIndirectBufferOut = mDynamicIndirectBuffer.getCurrentBuffer();
3335 
3336     BufferHelper *destIndexBuffer    = mDynamicIndexBuffer.getCurrentBuffer();
3337     BufferHelper *destIndirectBuffer = mDynamicIndirectBuffer.getCurrentBuffer();
3338 
3339     // Copy relevant section of the source into destination at allocated offset.  Note that the
3340     // offset returned by allocate() above is in bytes. As is the indices offset pointer.
3341     UtilsVk::ConvertLineLoopArrayIndirectParameters params = {};
3342     params.indirectBufferOffset    = static_cast<uint32_t>(arrayIndirectBufferOffset);
3343     params.dstIndirectBufferOffset = static_cast<uint32_t>(*indexIndirectBufferOffsetOut);
3344     params.dstIndexBufferOffset    = static_cast<uint32_t>(*indexBufferOffsetOut);
3345 
3346     ANGLE_TRY(contextVk->getUtils().convertLineLoopArrayIndirectBuffer(
3347         contextVk, arrayIndirectBuffer, destIndirectBuffer, destIndexBuffer, params));
3348 
3349     return angle::Result::Continue;
3350 }
3351 
release(ContextVk * contextVk)3352 void LineLoopHelper::release(ContextVk *contextVk)
3353 {
3354     mDynamicIndexBuffer.release(contextVk->getRenderer());
3355     mDynamicIndirectBuffer.release(contextVk->getRenderer());
3356 }
3357 
destroy(RendererVk * renderer)3358 void LineLoopHelper::destroy(RendererVk *renderer)
3359 {
3360     mDynamicIndexBuffer.destroy(renderer);
3361     mDynamicIndirectBuffer.destroy(renderer);
3362 }
3363 
3364 // static
Draw(uint32_t count,uint32_t baseVertex,CommandBuffer * commandBuffer)3365 void LineLoopHelper::Draw(uint32_t count, uint32_t baseVertex, CommandBuffer *commandBuffer)
3366 {
3367     // Our first index is always 0 because that's how we set it up in createIndexBuffer*.
3368     commandBuffer->drawIndexedBaseVertex(count, baseVertex);
3369 }
3370 
GetPipelineStage(gl::ShaderType stage)3371 PipelineStage GetPipelineStage(gl::ShaderType stage)
3372 {
3373     return kPipelineStageShaderMap[stage];
3374 }
3375 
3376 // PipelineBarrier implementation.
addDiagnosticsString(std::ostringstream & out) const3377 void PipelineBarrier::addDiagnosticsString(std::ostringstream &out) const
3378 {
3379     if (mMemoryBarrierSrcAccess != 0 || mMemoryBarrierDstAccess != 0)
3380     {
3381         out << "Src: 0x" << std::hex << mMemoryBarrierSrcAccess << " &rarr; Dst: 0x" << std::hex
3382             << mMemoryBarrierDstAccess << std::endl;
3383     }
3384 }
3385 
3386 // BufferHelper implementation.
BufferHelper()3387 BufferHelper::BufferHelper()
3388     : mMemoryPropertyFlags{},
3389       mSize(0),
3390       mCurrentQueueFamilyIndex(std::numeric_limits<uint32_t>::max()),
3391       mCurrentWriteAccess(0),
3392       mCurrentReadAccess(0),
3393       mCurrentWriteStages(0),
3394       mCurrentReadStages(0),
3395       mSerial()
3396 {}
3397 
BufferMemory()3398 BufferMemory::BufferMemory() : mClientBuffer(nullptr), mMappedMemory(nullptr) {}
3399 
3400 BufferMemory::~BufferMemory() = default;
3401 
initExternal(GLeglClientBufferEXT clientBuffer)3402 angle::Result BufferMemory::initExternal(GLeglClientBufferEXT clientBuffer)
3403 {
3404     ASSERT(clientBuffer != nullptr);
3405     mClientBuffer = clientBuffer;
3406     return angle::Result::Continue;
3407 }
3408 
init()3409 angle::Result BufferMemory::init()
3410 {
3411     ASSERT(mClientBuffer == nullptr);
3412     return angle::Result::Continue;
3413 }
3414 
unmap(RendererVk * renderer)3415 void BufferMemory::unmap(RendererVk *renderer)
3416 {
3417     if (mMappedMemory != nullptr)
3418     {
3419         if (isExternalBuffer())
3420         {
3421             mExternalMemory.unmap(renderer->getDevice());
3422         }
3423         else
3424         {
3425             mAllocation.unmap(renderer->getAllocator());
3426         }
3427 
3428         mMappedMemory = nullptr;
3429     }
3430 }
3431 
destroy(RendererVk * renderer)3432 void BufferMemory::destroy(RendererVk *renderer)
3433 {
3434     if (isExternalBuffer())
3435     {
3436         mExternalMemory.destroy(renderer->getDevice());
3437         ReleaseAndroidExternalMemory(renderer, mClientBuffer);
3438     }
3439     else
3440     {
3441         mAllocation.destroy(renderer->getAllocator());
3442     }
3443 }
3444 
flush(RendererVk * renderer,VkMemoryMapFlags memoryPropertyFlags,VkDeviceSize offset,VkDeviceSize size)3445 void BufferMemory::flush(RendererVk *renderer,
3446                          VkMemoryMapFlags memoryPropertyFlags,
3447                          VkDeviceSize offset,
3448                          VkDeviceSize size)
3449 {
3450     if (isExternalBuffer())
3451     {
3452         // if the memory type is not host coherent, we perform an explicit flush
3453         if ((memoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0)
3454         {
3455             VkMappedMemoryRange mappedRange = {};
3456             mappedRange.sType               = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
3457             mappedRange.memory              = mExternalMemory.getHandle();
3458             mappedRange.offset              = offset;
3459             mappedRange.size                = size;
3460             mExternalMemory.flush(renderer->getDevice(), mappedRange);
3461         }
3462     }
3463     else
3464     {
3465         mAllocation.flush(renderer->getAllocator(), offset, size);
3466     }
3467 }
3468 
invalidate(RendererVk * renderer,VkMemoryMapFlags memoryPropertyFlags,VkDeviceSize offset,VkDeviceSize size)3469 void BufferMemory::invalidate(RendererVk *renderer,
3470                               VkMemoryMapFlags memoryPropertyFlags,
3471                               VkDeviceSize offset,
3472                               VkDeviceSize size)
3473 {
3474     if (isExternalBuffer())
3475     {
3476         // if the memory type is not device coherent, we perform an explicit invalidate
3477         if ((memoryPropertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD) == 0)
3478         {
3479             VkMappedMemoryRange memoryRanges = {};
3480             memoryRanges.sType               = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
3481             memoryRanges.memory              = mExternalMemory.getHandle();
3482             memoryRanges.offset              = offset;
3483             memoryRanges.size                = size;
3484             mExternalMemory.invalidate(renderer->getDevice(), memoryRanges);
3485         }
3486     }
3487     else
3488     {
3489         mAllocation.invalidate(renderer->getAllocator(), offset, size);
3490     }
3491 }
3492 
mapImpl(ContextVk * contextVk,VkDeviceSize size)3493 angle::Result BufferMemory::mapImpl(ContextVk *contextVk, VkDeviceSize size)
3494 {
3495     if (isExternalBuffer())
3496     {
3497         ANGLE_VK_TRY(contextVk, mExternalMemory.map(contextVk->getRenderer()->getDevice(), 0, size,
3498                                                     0, &mMappedMemory));
3499     }
3500     else
3501     {
3502         ANGLE_VK_TRY(contextVk,
3503                      mAllocation.map(contextVk->getRenderer()->getAllocator(), &mMappedMemory));
3504     }
3505 
3506     return angle::Result::Continue;
3507 }
3508 
3509 BufferHelper::~BufferHelper() = default;
3510 
init(ContextVk * contextVk,const VkBufferCreateInfo & requestedCreateInfo,VkMemoryPropertyFlags memoryPropertyFlags)3511 angle::Result BufferHelper::init(ContextVk *contextVk,
3512                                  const VkBufferCreateInfo &requestedCreateInfo,
3513                                  VkMemoryPropertyFlags memoryPropertyFlags)
3514 {
3515     RendererVk *renderer = contextVk->getRenderer();
3516 
3517     mSerial = renderer->getResourceSerialFactory().generateBufferSerial();
3518     mSize   = requestedCreateInfo.size;
3519 
3520     VkBufferCreateInfo modifiedCreateInfo;
3521     const VkBufferCreateInfo *createInfo = &requestedCreateInfo;
3522 
3523     if (renderer->getFeatures().padBuffersToMaxVertexAttribStride.enabled)
3524     {
3525         const VkDeviceSize maxVertexAttribStride = renderer->getMaxVertexAttribStride();
3526         ASSERT(maxVertexAttribStride);
3527         modifiedCreateInfo = requestedCreateInfo;
3528         modifiedCreateInfo.size += maxVertexAttribStride;
3529         createInfo = &modifiedCreateInfo;
3530     }
3531 
3532     VkMemoryPropertyFlags requiredFlags =
3533         (memoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
3534     VkMemoryPropertyFlags preferredFlags =
3535         (memoryPropertyFlags & (~VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3536 
3537     const Allocator &allocator = renderer->getAllocator();
3538     bool persistentlyMapped    = renderer->getFeatures().persistentlyMappedBuffers.enabled;
3539 
3540     // Check that the allocation is not too large.
3541     uint32_t memoryTypeIndex = 0;
3542     ANGLE_VK_TRY(contextVk, allocator.findMemoryTypeIndexForBufferInfo(
3543                                 *createInfo, requiredFlags, preferredFlags, persistentlyMapped,
3544                                 &memoryTypeIndex));
3545 
3546     VkDeviceSize heapSize =
3547         renderer->getMemoryProperties().getHeapSizeForMemoryType(memoryTypeIndex);
3548 
3549     ANGLE_VK_CHECK(contextVk, createInfo->size <= heapSize, VK_ERROR_OUT_OF_DEVICE_MEMORY);
3550 
3551     ANGLE_VK_TRY(contextVk, allocator.createBuffer(*createInfo, requiredFlags, preferredFlags,
3552                                                    persistentlyMapped, &memoryTypeIndex, &mBuffer,
3553                                                    mMemory.getMemoryObject()));
3554     allocator.getMemoryTypeProperties(memoryTypeIndex, &mMemoryPropertyFlags);
3555     mCurrentQueueFamilyIndex = renderer->getQueueFamilyIndex();
3556 
3557     if (renderer->getFeatures().allocateNonZeroMemory.enabled)
3558     {
3559         // This memory can't be mapped, so the buffer must be marked as a transfer destination so we
3560         // can use a staging resource to initialize it to a non-zero value. If the memory is
3561         // mappable we do the initialization in AllocateBufferMemory.
3562         if ((mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0 &&
3563             (requestedCreateInfo.usage & VK_BUFFER_USAGE_TRANSFER_DST_BIT) != 0)
3564         {
3565             ANGLE_TRY(initializeNonZeroMemory(contextVk, createInfo->size));
3566         }
3567         else if ((mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
3568         {
3569             // Can map the memory.
3570             // Pick an arbitrary value to initialize non-zero memory for sanitization.
3571             constexpr int kNonZeroInitValue = 55;
3572             ANGLE_TRY(InitMappableAllocation(contextVk, allocator, mMemory.getMemoryObject(), mSize,
3573                                              kNonZeroInitValue, mMemoryPropertyFlags));
3574         }
3575     }
3576 
3577     ANGLE_TRY(mMemory.init());
3578 
3579     return angle::Result::Continue;
3580 }
3581 
initExternal(ContextVk * contextVk,VkMemoryPropertyFlags memoryProperties,const VkBufferCreateInfo & requestedCreateInfo,GLeglClientBufferEXT clientBuffer)3582 angle::Result BufferHelper::initExternal(ContextVk *contextVk,
3583                                          VkMemoryPropertyFlags memoryProperties,
3584                                          const VkBufferCreateInfo &requestedCreateInfo,
3585                                          GLeglClientBufferEXT clientBuffer)
3586 {
3587     ASSERT(IsAndroid());
3588 
3589     RendererVk *renderer = contextVk->getRenderer();
3590 
3591     mSerial = renderer->getResourceSerialFactory().generateBufferSerial();
3592     mSize   = requestedCreateInfo.size;
3593 
3594     VkBufferCreateInfo modifiedCreateInfo             = requestedCreateInfo;
3595     VkExternalMemoryBufferCreateInfo externCreateInfo = {};
3596     externCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO;
3597     externCreateInfo.handleTypes =
3598         VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
3599     externCreateInfo.pNext   = nullptr;
3600     modifiedCreateInfo.pNext = &externCreateInfo;
3601 
3602     ANGLE_VK_TRY(contextVk, mBuffer.init(renderer->getDevice(), modifiedCreateInfo));
3603 
3604     ANGLE_TRY(InitAndroidExternalMemory(contextVk, clientBuffer, memoryProperties, &mBuffer,
3605                                         &mMemoryPropertyFlags, mMemory.getExternalMemoryObject()));
3606 
3607     ANGLE_TRY(mMemory.initExternal(clientBuffer));
3608 
3609     mCurrentQueueFamilyIndex = renderer->getQueueFamilyIndex();
3610 
3611     return angle::Result::Continue;
3612 }
3613 
initializeNonZeroMemory(Context * context,VkDeviceSize size)3614 angle::Result BufferHelper::initializeNonZeroMemory(Context *context, VkDeviceSize size)
3615 {
3616     // Staging buffer memory is non-zero-initialized in 'init'.
3617     StagingBuffer stagingBuffer;
3618     ANGLE_TRY(stagingBuffer.init(context, size, StagingUsage::Both));
3619 
3620     RendererVk *renderer = context->getRenderer();
3621 
3622     PrimaryCommandBuffer commandBuffer;
3623     ANGLE_TRY(renderer->getCommandBufferOneOff(context, false, &commandBuffer));
3624 
3625     // Queue a DMA copy.
3626     VkBufferCopy copyRegion = {};
3627     copyRegion.srcOffset    = 0;
3628     copyRegion.dstOffset    = 0;
3629     copyRegion.size         = size;
3630 
3631     commandBuffer.copyBuffer(stagingBuffer.getBuffer(), mBuffer, 1, &copyRegion);
3632 
3633     ANGLE_VK_TRY(context, commandBuffer.end());
3634 
3635     Serial serial;
3636     ANGLE_TRY(renderer->queueSubmitOneOff(context, std::move(commandBuffer), false,
3637                                           egl::ContextPriority::Medium, nullptr,
3638                                           vk::SubmitPolicy::AllowDeferred, &serial));
3639 
3640     stagingBuffer.collectGarbage(renderer, serial);
3641     mUse.updateSerialOneOff(serial);
3642 
3643     return angle::Result::Continue;
3644 }
3645 
destroy(RendererVk * renderer)3646 void BufferHelper::destroy(RendererVk *renderer)
3647 {
3648     VkDevice device = renderer->getDevice();
3649     unmap(renderer);
3650     mSize = 0;
3651 
3652     mBuffer.destroy(device);
3653     mMemory.destroy(renderer);
3654 }
3655 
release(RendererVk * renderer)3656 void BufferHelper::release(RendererVk *renderer)
3657 {
3658     unmap(renderer);
3659     mSize = 0;
3660 
3661     renderer->collectGarbageAndReinit(&mUse, &mBuffer, mMemory.getExternalMemoryObject(),
3662                                       mMemory.getMemoryObject());
3663 }
3664 
copyFromBuffer(ContextVk * contextVk,BufferHelper * srcBuffer,uint32_t regionCount,const VkBufferCopy * copyRegions)3665 angle::Result BufferHelper::copyFromBuffer(ContextVk *contextVk,
3666                                            BufferHelper *srcBuffer,
3667                                            uint32_t regionCount,
3668                                            const VkBufferCopy *copyRegions)
3669 {
3670     // Check for self-dependency.
3671     vk::CommandBufferAccess access;
3672     if (srcBuffer->getBufferSerial() == getBufferSerial())
3673     {
3674         access.onBufferSelfCopy(this);
3675     }
3676     else
3677     {
3678         access.onBufferTransferRead(srcBuffer);
3679         access.onBufferTransferWrite(this);
3680     }
3681 
3682     CommandBuffer *commandBuffer;
3683     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(access, &commandBuffer));
3684 
3685     commandBuffer->copyBuffer(srcBuffer->getBuffer(), mBuffer, regionCount, copyRegions);
3686 
3687     return angle::Result::Continue;
3688 }
3689 
unmap(RendererVk * renderer)3690 void BufferHelper::unmap(RendererVk *renderer)
3691 {
3692     mMemory.unmap(renderer);
3693 }
3694 
flush(RendererVk * renderer,VkDeviceSize offset,VkDeviceSize size)3695 angle::Result BufferHelper::flush(RendererVk *renderer, VkDeviceSize offset, VkDeviceSize size)
3696 {
3697     bool hostVisible  = mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3698     bool hostCoherent = mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
3699     if (hostVisible && !hostCoherent)
3700     {
3701         mMemory.flush(renderer, mMemoryPropertyFlags, offset, size);
3702     }
3703     return angle::Result::Continue;
3704 }
3705 
invalidate(RendererVk * renderer,VkDeviceSize offset,VkDeviceSize size)3706 angle::Result BufferHelper::invalidate(RendererVk *renderer, VkDeviceSize offset, VkDeviceSize size)
3707 {
3708     bool hostVisible  = mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3709     bool hostCoherent = mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
3710     if (hostVisible && !hostCoherent)
3711     {
3712         mMemory.invalidate(renderer, mMemoryPropertyFlags, offset, size);
3713     }
3714     return angle::Result::Continue;
3715 }
3716 
changeQueue(uint32_t newQueueFamilyIndex,CommandBuffer * commandBuffer)3717 void BufferHelper::changeQueue(uint32_t newQueueFamilyIndex, CommandBuffer *commandBuffer)
3718 {
3719     VkBufferMemoryBarrier bufferMemoryBarrier = {};
3720     bufferMemoryBarrier.sType                 = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
3721     bufferMemoryBarrier.srcAccessMask         = 0;
3722     bufferMemoryBarrier.dstAccessMask         = 0;
3723     bufferMemoryBarrier.srcQueueFamilyIndex   = mCurrentQueueFamilyIndex;
3724     bufferMemoryBarrier.dstQueueFamilyIndex   = newQueueFamilyIndex;
3725     bufferMemoryBarrier.buffer                = mBuffer.getHandle();
3726     bufferMemoryBarrier.offset                = 0;
3727     bufferMemoryBarrier.size                  = VK_WHOLE_SIZE;
3728 
3729     commandBuffer->bufferBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
3730                                  VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, &bufferMemoryBarrier);
3731 
3732     mCurrentQueueFamilyIndex = newQueueFamilyIndex;
3733 }
3734 
acquireFromExternal(ContextVk * contextVk,uint32_t externalQueueFamilyIndex,uint32_t rendererQueueFamilyIndex,CommandBuffer * commandBuffer)3735 void BufferHelper::acquireFromExternal(ContextVk *contextVk,
3736                                        uint32_t externalQueueFamilyIndex,
3737                                        uint32_t rendererQueueFamilyIndex,
3738                                        CommandBuffer *commandBuffer)
3739 {
3740     mCurrentQueueFamilyIndex = externalQueueFamilyIndex;
3741 
3742     changeQueue(rendererQueueFamilyIndex, commandBuffer);
3743 }
3744 
releaseToExternal(ContextVk * contextVk,uint32_t rendererQueueFamilyIndex,uint32_t externalQueueFamilyIndex,CommandBuffer * commandBuffer)3745 void BufferHelper::releaseToExternal(ContextVk *contextVk,
3746                                      uint32_t rendererQueueFamilyIndex,
3747                                      uint32_t externalQueueFamilyIndex,
3748                                      CommandBuffer *commandBuffer)
3749 {
3750     ASSERT(mCurrentQueueFamilyIndex == rendererQueueFamilyIndex);
3751 
3752     changeQueue(externalQueueFamilyIndex, commandBuffer);
3753 }
3754 
isReleasedToExternal() const3755 bool BufferHelper::isReleasedToExternal() const
3756 {
3757 #if !defined(ANGLE_PLATFORM_MACOS) && !defined(ANGLE_PLATFORM_ANDROID)
3758     return IsExternalQueueFamily(mCurrentQueueFamilyIndex);
3759 #else
3760     // TODO(anglebug.com/4635): Implement external memory barriers on Mac/Android.
3761     return false;
3762 #endif
3763 }
3764 
recordReadBarrier(VkAccessFlags readAccessType,VkPipelineStageFlags readStage,PipelineBarrier * barrier)3765 bool BufferHelper::recordReadBarrier(VkAccessFlags readAccessType,
3766                                      VkPipelineStageFlags readStage,
3767                                      PipelineBarrier *barrier)
3768 {
3769     bool barrierModified = false;
3770     // If there was a prior write and we are making a read that is either a new access type or from
3771     // a new stage, we need a barrier
3772     if (mCurrentWriteAccess != 0 && (((mCurrentReadAccess & readAccessType) != readAccessType) ||
3773                                      ((mCurrentReadStages & readStage) != readStage)))
3774     {
3775         barrier->mergeMemoryBarrier(mCurrentWriteStages, readStage, mCurrentWriteAccess,
3776                                     readAccessType);
3777         barrierModified = true;
3778     }
3779 
3780     // Accumulate new read usage.
3781     mCurrentReadAccess |= readAccessType;
3782     mCurrentReadStages |= readStage;
3783     return barrierModified;
3784 }
3785 
recordWriteBarrier(VkAccessFlags writeAccessType,VkPipelineStageFlags writeStage,PipelineBarrier * barrier)3786 bool BufferHelper::recordWriteBarrier(VkAccessFlags writeAccessType,
3787                                       VkPipelineStageFlags writeStage,
3788                                       PipelineBarrier *barrier)
3789 {
3790     bool barrierModified = false;
3791     // We don't need to check mCurrentReadStages here since if it is not zero, mCurrentReadAccess
3792     // must not be zero as well. stage is finer grain than accessType.
3793     ASSERT((!mCurrentReadStages && !mCurrentReadAccess) ||
3794            (mCurrentReadStages && mCurrentReadAccess));
3795     if (mCurrentReadAccess != 0 || mCurrentWriteAccess != 0)
3796     {
3797         barrier->mergeMemoryBarrier(mCurrentWriteStages | mCurrentReadStages, writeStage,
3798                                     mCurrentWriteAccess, writeAccessType);
3799         barrierModified = true;
3800     }
3801 
3802     // Reset usages on the new write.
3803     mCurrentWriteAccess = writeAccessType;
3804     mCurrentReadAccess  = 0;
3805     mCurrentWriteStages = writeStage;
3806     mCurrentReadStages  = 0;
3807     return barrierModified;
3808 }
3809 
3810 // ImageHelper implementation.
ImageHelper()3811 ImageHelper::ImageHelper()
3812 {
3813     resetCachedProperties();
3814 }
3815 
ImageHelper(ImageHelper && other)3816 ImageHelper::ImageHelper(ImageHelper &&other)
3817     : Resource(std::move(other)),
3818       mImage(std::move(other.mImage)),
3819       mDeviceMemory(std::move(other.mDeviceMemory)),
3820       mImageType(other.mImageType),
3821       mTilingMode(other.mTilingMode),
3822       mCreateFlags(other.mCreateFlags),
3823       mUsage(other.mUsage),
3824       mExtents(other.mExtents),
3825       mRotatedAspectRatio(other.mRotatedAspectRatio),
3826       mFormat(other.mFormat),
3827       mSamples(other.mSamples),
3828       mImageSerial(other.mImageSerial),
3829       mCurrentLayout(other.mCurrentLayout),
3830       mCurrentQueueFamilyIndex(other.mCurrentQueueFamilyIndex),
3831       mLastNonShaderReadOnlyLayout(other.mLastNonShaderReadOnlyLayout),
3832       mCurrentShaderReadStageMask(other.mCurrentShaderReadStageMask),
3833       mYuvConversionSampler(std::move(other.mYuvConversionSampler)),
3834       mExternalFormat(other.mExternalFormat),
3835       mFirstAllocatedLevel(other.mFirstAllocatedLevel),
3836       mLayerCount(other.mLayerCount),
3837       mLevelCount(other.mLevelCount),
3838       mStagingBuffer(std::move(other.mStagingBuffer)),
3839       mSubresourceUpdates(std::move(other.mSubresourceUpdates)),
3840       mCurrentSingleClearValue(std::move(other.mCurrentSingleClearValue)),
3841       mContentDefined(std::move(other.mContentDefined)),
3842       mStencilContentDefined(std::move(other.mStencilContentDefined))
3843 {
3844     ASSERT(this != &other);
3845     other.resetCachedProperties();
3846 }
3847 
~ImageHelper()3848 ImageHelper::~ImageHelper()
3849 {
3850     ASSERT(!valid());
3851 }
3852 
resetCachedProperties()3853 void ImageHelper::resetCachedProperties()
3854 {
3855     mImageType                   = VK_IMAGE_TYPE_2D;
3856     mTilingMode                  = VK_IMAGE_TILING_OPTIMAL;
3857     mCreateFlags                 = kVkImageCreateFlagsNone;
3858     mUsage                       = 0;
3859     mExtents                     = {};
3860     mRotatedAspectRatio          = false;
3861     mFormat                      = nullptr;
3862     mSamples                     = 1;
3863     mImageSerial                 = kInvalidImageSerial;
3864     mCurrentLayout               = ImageLayout::Undefined;
3865     mCurrentQueueFamilyIndex     = std::numeric_limits<uint32_t>::max();
3866     mLastNonShaderReadOnlyLayout = ImageLayout::Undefined;
3867     mCurrentShaderReadStageMask  = 0;
3868     mFirstAllocatedLevel         = gl::LevelIndex(0);
3869     mLayerCount                  = 0;
3870     mLevelCount                  = 0;
3871     mExternalFormat              = 0;
3872     mCurrentSingleClearValue.reset();
3873     mRenderPassUsageFlags.reset();
3874 
3875     setEntireContentUndefined();
3876 }
3877 
setEntireContentDefined()3878 void ImageHelper::setEntireContentDefined()
3879 {
3880     for (LevelContentDefinedMask &levelContentDefined : mContentDefined)
3881     {
3882         levelContentDefined.set();
3883     }
3884     for (LevelContentDefinedMask &levelContentDefined : mStencilContentDefined)
3885     {
3886         levelContentDefined.set();
3887     }
3888 }
3889 
setEntireContentUndefined()3890 void ImageHelper::setEntireContentUndefined()
3891 {
3892     for (LevelContentDefinedMask &levelContentDefined : mContentDefined)
3893     {
3894         levelContentDefined.reset();
3895     }
3896     for (LevelContentDefinedMask &levelContentDefined : mStencilContentDefined)
3897     {
3898         levelContentDefined.reset();
3899     }
3900 }
3901 
setContentDefined(LevelIndex levelStart,uint32_t levelCount,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags)3902 void ImageHelper::setContentDefined(LevelIndex levelStart,
3903                                     uint32_t levelCount,
3904                                     uint32_t layerStart,
3905                                     uint32_t layerCount,
3906                                     VkImageAspectFlags aspectFlags)
3907 {
3908     // Mark the range as defined.  Layers above 8 are discarded, and are always assumed to have
3909     // defined contents.
3910     if (layerStart >= kMaxContentDefinedLayerCount)
3911     {
3912         return;
3913     }
3914 
3915     uint8_t layerRangeBits =
3916         GetContentDefinedLayerRangeBits(layerStart, layerCount, kMaxContentDefinedLayerCount);
3917 
3918     for (uint32_t levelOffset = 0; levelOffset < levelCount; ++levelOffset)
3919     {
3920         LevelIndex level = levelStart + levelOffset;
3921 
3922         if ((aspectFlags & ~VK_IMAGE_ASPECT_STENCIL_BIT) != 0)
3923         {
3924             getLevelContentDefined(level) |= layerRangeBits;
3925         }
3926         if ((aspectFlags & VK_IMAGE_ASPECT_STENCIL_BIT) != 0)
3927         {
3928             getLevelStencilContentDefined(level) |= layerRangeBits;
3929         }
3930     }
3931 }
3932 
getLevelContentDefined(LevelIndex level)3933 ImageHelper::LevelContentDefinedMask &ImageHelper::getLevelContentDefined(LevelIndex level)
3934 {
3935     return mContentDefined[level.get()];
3936 }
3937 
getLevelStencilContentDefined(LevelIndex level)3938 ImageHelper::LevelContentDefinedMask &ImageHelper::getLevelStencilContentDefined(LevelIndex level)
3939 {
3940     return mStencilContentDefined[level.get()];
3941 }
3942 
getLevelContentDefined(LevelIndex level) const3943 const ImageHelper::LevelContentDefinedMask &ImageHelper::getLevelContentDefined(
3944     LevelIndex level) const
3945 {
3946     return mContentDefined[level.get()];
3947 }
3948 
getLevelStencilContentDefined(LevelIndex level) const3949 const ImageHelper::LevelContentDefinedMask &ImageHelper::getLevelStencilContentDefined(
3950     LevelIndex level) const
3951 {
3952     return mStencilContentDefined[level.get()];
3953 }
3954 
initStagingBuffer(RendererVk * renderer,size_t imageCopyBufferAlignment,VkBufferUsageFlags usageFlags,size_t initialSize)3955 void ImageHelper::initStagingBuffer(RendererVk *renderer,
3956                                     size_t imageCopyBufferAlignment,
3957                                     VkBufferUsageFlags usageFlags,
3958                                     size_t initialSize)
3959 {
3960     mStagingBuffer.init(renderer, usageFlags, imageCopyBufferAlignment, initialSize, true,
3961                         DynamicBufferPolicy::OneShotUse);
3962 }
3963 
init(Context * context,gl::TextureType textureType,const VkExtent3D & extents,const Format & format,GLint samples,VkImageUsageFlags usage,gl::LevelIndex firstLevel,uint32_t mipLevels,uint32_t layerCount,bool isRobustResourceInitEnabled,bool hasProtectedContent)3964 angle::Result ImageHelper::init(Context *context,
3965                                 gl::TextureType textureType,
3966                                 const VkExtent3D &extents,
3967                                 const Format &format,
3968                                 GLint samples,
3969                                 VkImageUsageFlags usage,
3970                                 gl::LevelIndex firstLevel,
3971                                 uint32_t mipLevels,
3972                                 uint32_t layerCount,
3973                                 bool isRobustResourceInitEnabled,
3974                                 bool hasProtectedContent)
3975 {
3976     return initExternal(context, textureType, extents, format, samples, usage,
3977                         kVkImageCreateFlagsNone, ImageLayout::Undefined, nullptr, firstLevel,
3978                         mipLevels, layerCount, isRobustResourceInitEnabled, nullptr,
3979                         hasProtectedContent);
3980 }
3981 
initMSAASwapchain(Context * context,gl::TextureType textureType,const VkExtent3D & extents,bool rotatedAspectRatio,const Format & format,GLint samples,VkImageUsageFlags usage,gl::LevelIndex firstLevel,uint32_t mipLevels,uint32_t layerCount,bool isRobustResourceInitEnabled,bool hasProtectedContent)3982 angle::Result ImageHelper::initMSAASwapchain(Context *context,
3983                                              gl::TextureType textureType,
3984                                              const VkExtent3D &extents,
3985                                              bool rotatedAspectRatio,
3986                                              const Format &format,
3987                                              GLint samples,
3988                                              VkImageUsageFlags usage,
3989                                              gl::LevelIndex firstLevel,
3990                                              uint32_t mipLevels,
3991                                              uint32_t layerCount,
3992                                              bool isRobustResourceInitEnabled,
3993                                              bool hasProtectedContent)
3994 {
3995     ANGLE_TRY(initExternal(context, textureType, extents, format, samples, usage,
3996                            kVkImageCreateFlagsNone, ImageLayout::Undefined, nullptr, firstLevel,
3997                            mipLevels, layerCount, isRobustResourceInitEnabled, nullptr,
3998                            hasProtectedContent));
3999     if (rotatedAspectRatio)
4000     {
4001         std::swap(mExtents.width, mExtents.height);
4002     }
4003     mRotatedAspectRatio = rotatedAspectRatio;
4004     return angle::Result::Continue;
4005 }
4006 
initExternal(Context * context,gl::TextureType textureType,const VkExtent3D & extents,const Format & format,GLint samples,VkImageUsageFlags usage,VkImageCreateFlags additionalCreateFlags,ImageLayout initialLayout,const void * externalImageCreateInfo,gl::LevelIndex firstLevel,uint32_t mipLevels,uint32_t layerCount,bool isRobustResourceInitEnabled,bool * imageFormatListEnabledOut,bool hasProtectedContent)4007 angle::Result ImageHelper::initExternal(Context *context,
4008                                         gl::TextureType textureType,
4009                                         const VkExtent3D &extents,
4010                                         const Format &format,
4011                                         GLint samples,
4012                                         VkImageUsageFlags usage,
4013                                         VkImageCreateFlags additionalCreateFlags,
4014                                         ImageLayout initialLayout,
4015                                         const void *externalImageCreateInfo,
4016                                         gl::LevelIndex firstLevel,
4017                                         uint32_t mipLevels,
4018                                         uint32_t layerCount,
4019                                         bool isRobustResourceInitEnabled,
4020                                         bool *imageFormatListEnabledOut,
4021                                         bool hasProtectedContent)
4022 {
4023     ASSERT(!valid());
4024     ASSERT(!IsAnySubresourceContentDefined(mContentDefined));
4025     ASSERT(!IsAnySubresourceContentDefined(mStencilContentDefined));
4026 
4027     mImageType           = gl_vk::GetImageType(textureType);
4028     mExtents             = extents;
4029     mRotatedAspectRatio  = false;
4030     mFormat              = &format;
4031     mSamples             = std::max(samples, 1);
4032     mImageSerial         = context->getRenderer()->getResourceSerialFactory().generateImageSerial();
4033     mFirstAllocatedLevel = firstLevel;
4034     mLevelCount          = mipLevels;
4035     mLayerCount          = layerCount;
4036     mCreateFlags         = GetImageCreateFlags(textureType) | additionalCreateFlags;
4037     mUsage               = usage;
4038 
4039     // Validate that mLayerCount is compatible with the texture type
4040     ASSERT(textureType != gl::TextureType::_3D || mLayerCount == 1);
4041     ASSERT(textureType != gl::TextureType::_2DArray || mExtents.depth == 1);
4042     ASSERT(textureType != gl::TextureType::External || mLayerCount == 1);
4043     ASSERT(textureType != gl::TextureType::Rectangle || mLayerCount == 1);
4044     ASSERT(textureType != gl::TextureType::CubeMap || mLayerCount == gl::kCubeFaceCount);
4045 
4046     // With the introduction of sRGB related GLES extensions any sample/render target could be
4047     // respecified causing it to be interpreted in a different colorspace. Create the VkImage
4048     // accordingly.
4049     bool imageFormatListEnabled                        = false;
4050     RendererVk *rendererVk                             = context->getRenderer();
4051     VkImageFormatListCreateInfoKHR imageFormatListInfo = {};
4052     angle::FormatID imageFormat                        = format.actualImageFormatID;
4053     angle::FormatID additionalFormat                   = format.actualImageFormat().isSRGB
4054                                            ? ConvertToLinear(imageFormat)
4055                                            : ConvertToSRGB(imageFormat);
4056     constexpr uint32_t kImageListFormatCount = 2;
4057     VkFormat imageListFormats[kImageListFormatCount];
4058     imageListFormats[0] = vk::GetVkFormatFromFormatID(imageFormat);
4059     imageListFormats[1] = vk::GetVkFormatFromFormatID(additionalFormat);
4060 
4061     if (rendererVk->getFeatures().supportsImageFormatList.enabled &&
4062         rendererVk->haveSameFormatFeatureBits(imageFormat, additionalFormat))
4063     {
4064         imageFormatListEnabled = true;
4065 
4066         // Add VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT to VkImage create flag
4067         mCreateFlags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
4068 
4069         // There is just 1 additional format we might use to create a VkImageView for this
4070         // VkImage
4071         imageFormatListInfo.sType           = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR;
4072         imageFormatListInfo.pNext           = externalImageCreateInfo;
4073         imageFormatListInfo.viewFormatCount = kImageListFormatCount;
4074         imageFormatListInfo.pViewFormats    = imageListFormats;
4075     }
4076 
4077     if (imageFormatListEnabledOut)
4078     {
4079         *imageFormatListEnabledOut = imageFormatListEnabled;
4080     }
4081 
4082     mYuvConversionSampler.reset();
4083     mExternalFormat = 0;
4084     if (format.actualImageFormat().isYUV)
4085     {
4086         // The Vulkan spec states: If sampler is used and the VkFormat of the image is a
4087         // multi-planar format, the image must have been created with
4088         // VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT
4089         mCreateFlags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
4090 
4091         // The Vulkan spec states: The potential format features of the sampler YCBCR conversion
4092         // must support VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT or
4093         // VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT
4094         constexpr VkFormatFeatureFlags kChromaSubSampleFeatureBits =
4095             VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT |
4096             VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT;
4097 
4098         VkFormatFeatureFlags supportedChromaSubSampleFeatureBits =
4099             rendererVk->getImageFormatFeatureBits(format.actualImageFormatID,
4100                                                   kChromaSubSampleFeatureBits);
4101 
4102         VkChromaLocation supportedLocation = ((supportedChromaSubSampleFeatureBits &
4103                                                VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT) != 0)
4104                                                  ? VK_CHROMA_LOCATION_COSITED_EVEN
4105                                                  : VK_CHROMA_LOCATION_MIDPOINT;
4106 
4107         // Create the VkSamplerYcbcrConversion to associate with image views and samplers
4108         VkSamplerYcbcrConversionCreateInfo yuvConversionInfo = {};
4109         yuvConversionInfo.sType         = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO;
4110         yuvConversionInfo.format        = format.actualImageVkFormat();
4111         yuvConversionInfo.xChromaOffset = supportedLocation;
4112         yuvConversionInfo.yChromaOffset = supportedLocation;
4113         yuvConversionInfo.ycbcrModel    = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601;
4114         yuvConversionInfo.ycbcrRange    = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW;
4115         yuvConversionInfo.chromaFilter  = VK_FILTER_NEAREST;
4116 
4117         ANGLE_TRY(rendererVk->getYuvConversionCache().getYuvConversion(
4118             context, format.actualImageVkFormat(), false, yuvConversionInfo,
4119             &mYuvConversionSampler));
4120     }
4121 
4122     if (hasProtectedContent)
4123     {
4124         mCreateFlags |= VK_IMAGE_CREATE_PROTECTED_BIT;
4125     }
4126 
4127     VkImageCreateInfo imageInfo = {};
4128     imageInfo.sType             = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
4129     imageInfo.pNext     = (imageFormatListEnabled) ? &imageFormatListInfo : externalImageCreateInfo;
4130     imageInfo.flags     = mCreateFlags;
4131     imageInfo.imageType = mImageType;
4132     imageInfo.format    = format.actualImageVkFormat();
4133     imageInfo.extent    = mExtents;
4134     imageInfo.mipLevels = mLevelCount;
4135     imageInfo.arrayLayers           = mLayerCount;
4136     imageInfo.samples               = gl_vk::GetSamples(mSamples);
4137     imageInfo.tiling                = mTilingMode;
4138     imageInfo.usage                 = mUsage;
4139     imageInfo.sharingMode           = VK_SHARING_MODE_EXCLUSIVE;
4140     imageInfo.queueFamilyIndexCount = 0;
4141     imageInfo.pQueueFamilyIndices   = nullptr;
4142     imageInfo.initialLayout         = ConvertImageLayoutToVkImageLayout(initialLayout);
4143 
4144     mCurrentLayout = initialLayout;
4145 
4146     ANGLE_VK_TRY(context, mImage.init(context->getDevice(), imageInfo));
4147 
4148     stageClearIfEmulatedFormat(isRobustResourceInitEnabled);
4149 
4150     if (initialLayout != ImageLayout::Undefined)
4151     {
4152         setEntireContentDefined();
4153     }
4154 
4155     return angle::Result::Continue;
4156 }
4157 
releaseImage(RendererVk * renderer)4158 void ImageHelper::releaseImage(RendererVk *renderer)
4159 {
4160     renderer->collectGarbageAndReinit(&mUse, &mImage, &mDeviceMemory);
4161     mImageSerial = kInvalidImageSerial;
4162 
4163     setEntireContentUndefined();
4164 }
4165 
releaseImageFromShareContexts(RendererVk * renderer,ContextVk * contextVk)4166 void ImageHelper::releaseImageFromShareContexts(RendererVk *renderer, ContextVk *contextVk)
4167 {
4168     if (contextVk && mImageSerial.valid())
4169     {
4170         ContextVkSet &shareContextSet = *contextVk->getShareGroupVk()->getContexts();
4171         for (ContextVk *ctx : shareContextSet)
4172         {
4173             ctx->finalizeImageLayout(this);
4174         }
4175     }
4176 
4177     releaseImage(renderer);
4178 }
4179 
releaseStagingBuffer(RendererVk * renderer)4180 void ImageHelper::releaseStagingBuffer(RendererVk *renderer)
4181 {
4182     ASSERT(validateSubresourceUpdateImageRefsConsistent());
4183 
4184     // Remove updates that never made it to the texture.
4185     for (std::vector<SubresourceUpdate> &levelUpdates : mSubresourceUpdates)
4186     {
4187         for (SubresourceUpdate &update : levelUpdates)
4188         {
4189             update.release(renderer);
4190         }
4191     }
4192 
4193     ASSERT(validateSubresourceUpdateImageRefsConsistent());
4194 
4195     mStagingBuffer.release(renderer);
4196     mSubresourceUpdates.clear();
4197     mCurrentSingleClearValue.reset();
4198 }
4199 
resetImageWeakReference()4200 void ImageHelper::resetImageWeakReference()
4201 {
4202     mImage.reset();
4203     mImageSerial        = kInvalidImageSerial;
4204     mRotatedAspectRatio = false;
4205 }
4206 
initializeNonZeroMemory(Context * context,bool hasProtectedContent,VkDeviceSize size)4207 angle::Result ImageHelper::initializeNonZeroMemory(Context *context,
4208                                                    bool hasProtectedContent,
4209                                                    VkDeviceSize size)
4210 {
4211     const angle::Format &angleFormat = mFormat->actualImageFormat();
4212     bool isCompressedFormat          = angleFormat.isBlock;
4213 
4214     if (angleFormat.isYUV)
4215     {
4216         // VUID-vkCmdClearColorImage-image-01545
4217         // vkCmdClearColorImage(): format must not be one of the formats requiring sampler YCBCR
4218         // conversion for VK_IMAGE_ASPECT_COLOR_BIT image views
4219         return angle::Result::Continue;
4220     }
4221 
4222     RendererVk *renderer = context->getRenderer();
4223 
4224     PrimaryCommandBuffer commandBuffer;
4225     ANGLE_TRY(renderer->getCommandBufferOneOff(context, hasProtectedContent, &commandBuffer));
4226 
4227     // Queue a DMA copy.
4228     barrierImpl(context, getAspectFlags(), ImageLayout::TransferDst, mCurrentQueueFamilyIndex,
4229                 &commandBuffer);
4230 
4231     StagingBuffer stagingBuffer;
4232 
4233     if (isCompressedFormat)
4234     {
4235         // If format is compressed, set its contents through buffer copies.
4236 
4237         // The staging buffer memory is non-zero-initialized in 'init'.
4238         ANGLE_TRY(stagingBuffer.init(context, size, StagingUsage::Write));
4239 
4240         for (LevelIndex level(0); level < LevelIndex(mLevelCount); ++level)
4241         {
4242             VkBufferImageCopy copyRegion = {};
4243 
4244             gl_vk::GetExtent(getLevelExtents(level), &copyRegion.imageExtent);
4245             copyRegion.imageSubresource.aspectMask = getAspectFlags();
4246             copyRegion.imageSubresource.layerCount = mLayerCount;
4247 
4248             // If image has depth and stencil, copy to each individually per Vulkan spec.
4249             bool hasBothDepthAndStencil = isCombinedDepthStencilFormat();
4250             if (hasBothDepthAndStencil)
4251             {
4252                 copyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
4253             }
4254 
4255             commandBuffer.copyBufferToImage(stagingBuffer.getBuffer().getHandle(), mImage,
4256                                             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copyRegion);
4257 
4258             if (hasBothDepthAndStencil)
4259             {
4260                 copyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
4261 
4262                 commandBuffer.copyBufferToImage(stagingBuffer.getBuffer().getHandle(), mImage,
4263                                                 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
4264                                                 &copyRegion);
4265             }
4266         }
4267     }
4268     else
4269     {
4270         // Otherwise issue clear commands.
4271         VkImageSubresourceRange subresource = {};
4272         subresource.aspectMask              = getAspectFlags();
4273         subresource.baseMipLevel            = 0;
4274         subresource.levelCount              = mLevelCount;
4275         subresource.baseArrayLayer          = 0;
4276         subresource.layerCount              = mLayerCount;
4277 
4278         // Arbitrary value to initialize the memory with.  Note: the given uint value, reinterpreted
4279         // as float is about 0.7.
4280         constexpr uint32_t kInitValue   = 0x3F345678;
4281         constexpr float kInitValueFloat = 0.12345f;
4282 
4283         if ((subresource.aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != 0)
4284         {
4285             VkClearColorValue clearValue;
4286             clearValue.uint32[0] = kInitValue;
4287             clearValue.uint32[1] = kInitValue;
4288             clearValue.uint32[2] = kInitValue;
4289             clearValue.uint32[3] = kInitValue;
4290 
4291             commandBuffer.clearColorImage(mImage, getCurrentLayout(), clearValue, 1, &subresource);
4292         }
4293         else
4294         {
4295             VkClearDepthStencilValue clearValue;
4296             clearValue.depth   = kInitValueFloat;
4297             clearValue.stencil = kInitValue;
4298 
4299             commandBuffer.clearDepthStencilImage(mImage, getCurrentLayout(), clearValue, 1,
4300                                                  &subresource);
4301         }
4302     }
4303 
4304     ANGLE_VK_TRY(context, commandBuffer.end());
4305 
4306     Serial serial;
4307     ANGLE_TRY(renderer->queueSubmitOneOff(context, std::move(commandBuffer), hasProtectedContent,
4308                                           egl::ContextPriority::Medium, nullptr,
4309                                           vk::SubmitPolicy::AllowDeferred, &serial));
4310 
4311     if (isCompressedFormat)
4312     {
4313         stagingBuffer.collectGarbage(renderer, serial);
4314     }
4315     mUse.updateSerialOneOff(serial);
4316 
4317     return angle::Result::Continue;
4318 }
4319 
initMemory(Context * context,bool hasProtectedContent,const MemoryProperties & memoryProperties,VkMemoryPropertyFlags flags)4320 angle::Result ImageHelper::initMemory(Context *context,
4321                                       bool hasProtectedContent,
4322                                       const MemoryProperties &memoryProperties,
4323                                       VkMemoryPropertyFlags flags)
4324 {
4325     // TODO(jmadill): Memory sub-allocation. http://anglebug.com/2162
4326     VkDeviceSize size;
4327     if (hasProtectedContent)
4328     {
4329         flags |= VK_MEMORY_PROPERTY_PROTECTED_BIT;
4330     }
4331     ANGLE_TRY(AllocateImageMemory(context, flags, &flags, nullptr, &mImage, &mDeviceMemory, &size));
4332     mCurrentQueueFamilyIndex = context->getRenderer()->getQueueFamilyIndex();
4333 
4334     RendererVk *renderer = context->getRenderer();
4335     if (renderer->getFeatures().allocateNonZeroMemory.enabled)
4336     {
4337         // Can't map the memory. Use a staging resource.
4338         if ((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
4339         {
4340             ANGLE_TRY(initializeNonZeroMemory(context, hasProtectedContent, size));
4341         }
4342     }
4343 
4344     return angle::Result::Continue;
4345 }
4346 
initExternalMemory(Context * context,const MemoryProperties & memoryProperties,const VkMemoryRequirements & memoryRequirements,const VkSamplerYcbcrConversionCreateInfo * samplerYcbcrConversionCreateInfo,const void * extraAllocationInfo,uint32_t currentQueueFamilyIndex,VkMemoryPropertyFlags flags)4347 angle::Result ImageHelper::initExternalMemory(
4348     Context *context,
4349     const MemoryProperties &memoryProperties,
4350     const VkMemoryRequirements &memoryRequirements,
4351     const VkSamplerYcbcrConversionCreateInfo *samplerYcbcrConversionCreateInfo,
4352     const void *extraAllocationInfo,
4353     uint32_t currentQueueFamilyIndex,
4354     VkMemoryPropertyFlags flags)
4355 {
4356     // TODO(jmadill): Memory sub-allocation. http://anglebug.com/2162
4357     ANGLE_TRY(AllocateImageMemoryWithRequirements(context, flags, memoryRequirements,
4358                                                   extraAllocationInfo, &mImage, &mDeviceMemory));
4359     mCurrentQueueFamilyIndex = currentQueueFamilyIndex;
4360 
4361 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4362     if (samplerYcbcrConversionCreateInfo)
4363     {
4364         const VkExternalFormatANDROID *vkExternalFormat =
4365             reinterpret_cast<const VkExternalFormatANDROID *>(
4366                 samplerYcbcrConversionCreateInfo->pNext);
4367         ASSERT(vkExternalFormat->sType == VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID);
4368         mExternalFormat = vkExternalFormat->externalFormat;
4369 
4370         ANGLE_TRY(context->getRenderer()->getYuvConversionCache().getYuvConversion(
4371             context, mExternalFormat, true, *samplerYcbcrConversionCreateInfo,
4372             &mYuvConversionSampler));
4373     }
4374 #endif
4375     return angle::Result::Continue;
4376 }
4377 
initImageView(Context * context,gl::TextureType textureType,VkImageAspectFlags aspectMask,const gl::SwizzleState & swizzleMap,ImageView * imageViewOut,LevelIndex baseMipLevelVk,uint32_t levelCount)4378 angle::Result ImageHelper::initImageView(Context *context,
4379                                          gl::TextureType textureType,
4380                                          VkImageAspectFlags aspectMask,
4381                                          const gl::SwizzleState &swizzleMap,
4382                                          ImageView *imageViewOut,
4383                                          LevelIndex baseMipLevelVk,
4384                                          uint32_t levelCount)
4385 {
4386     return initLayerImageView(context, textureType, aspectMask, swizzleMap, imageViewOut,
4387                               baseMipLevelVk, levelCount, 0, mLayerCount,
4388                               gl::SrgbWriteControlMode::Default);
4389 }
4390 
initLayerImageView(Context * context,gl::TextureType textureType,VkImageAspectFlags aspectMask,const gl::SwizzleState & swizzleMap,ImageView * imageViewOut,LevelIndex baseMipLevelVk,uint32_t levelCount,uint32_t baseArrayLayer,uint32_t layerCount,gl::SrgbWriteControlMode mode) const4391 angle::Result ImageHelper::initLayerImageView(Context *context,
4392                                               gl::TextureType textureType,
4393                                               VkImageAspectFlags aspectMask,
4394                                               const gl::SwizzleState &swizzleMap,
4395                                               ImageView *imageViewOut,
4396                                               LevelIndex baseMipLevelVk,
4397                                               uint32_t levelCount,
4398                                               uint32_t baseArrayLayer,
4399                                               uint32_t layerCount,
4400                                               gl::SrgbWriteControlMode mode) const
4401 {
4402     angle::FormatID imageFormat = mFormat->actualImageFormatID;
4403 
4404     // If we are initializing an imageview for use with EXT_srgb_write_control, we need to override
4405     // the format to its linear counterpart. Formats that cannot be reinterpreted are exempt from
4406     // this requirement.
4407     if (mode == gl::SrgbWriteControlMode::Linear)
4408     {
4409         angle::FormatID linearFormat = ConvertToLinear(imageFormat);
4410         if (linearFormat != angle::FormatID::NONE)
4411         {
4412             imageFormat = linearFormat;
4413         }
4414     }
4415 
4416     return initLayerImageViewImpl(
4417         context, textureType, aspectMask, swizzleMap, imageViewOut, baseMipLevelVk, levelCount,
4418         baseArrayLayer, layerCount,
4419         context->getRenderer()->getFormat(imageFormat).actualImageVkFormat(), nullptr);
4420 }
4421 
initLayerImageViewWithFormat(Context * context,gl::TextureType textureType,const Format & format,VkImageAspectFlags aspectMask,const gl::SwizzleState & swizzleMap,ImageView * imageViewOut,LevelIndex baseMipLevelVk,uint32_t levelCount,uint32_t baseArrayLayer,uint32_t layerCount) const4422 angle::Result ImageHelper::initLayerImageViewWithFormat(Context *context,
4423                                                         gl::TextureType textureType,
4424                                                         const Format &format,
4425                                                         VkImageAspectFlags aspectMask,
4426                                                         const gl::SwizzleState &swizzleMap,
4427                                                         ImageView *imageViewOut,
4428                                                         LevelIndex baseMipLevelVk,
4429                                                         uint32_t levelCount,
4430                                                         uint32_t baseArrayLayer,
4431                                                         uint32_t layerCount) const
4432 {
4433     return initLayerImageViewImpl(context, textureType, aspectMask, swizzleMap, imageViewOut,
4434                                   baseMipLevelVk, levelCount, baseArrayLayer, layerCount,
4435                                   format.actualImageVkFormat(), nullptr);
4436 }
4437 
initLayerImageViewImpl(Context * context,gl::TextureType textureType,VkImageAspectFlags aspectMask,const gl::SwizzleState & swizzleMap,ImageView * imageViewOut,LevelIndex baseMipLevelVk,uint32_t levelCount,uint32_t baseArrayLayer,uint32_t layerCount,VkFormat imageFormat,const VkImageViewUsageCreateInfo * imageViewUsageCreateInfo) const4438 angle::Result ImageHelper::initLayerImageViewImpl(
4439     Context *context,
4440     gl::TextureType textureType,
4441     VkImageAspectFlags aspectMask,
4442     const gl::SwizzleState &swizzleMap,
4443     ImageView *imageViewOut,
4444     LevelIndex baseMipLevelVk,
4445     uint32_t levelCount,
4446     uint32_t baseArrayLayer,
4447     uint32_t layerCount,
4448     VkFormat imageFormat,
4449     const VkImageViewUsageCreateInfo *imageViewUsageCreateInfo) const
4450 {
4451     VkImageViewCreateInfo viewInfo = {};
4452     viewInfo.sType                 = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
4453     viewInfo.flags                 = 0;
4454     viewInfo.image                 = mImage.getHandle();
4455     viewInfo.viewType              = gl_vk::GetImageViewType(textureType);
4456     viewInfo.format                = imageFormat;
4457 
4458     if (swizzleMap.swizzleRequired() && !mYuvConversionSampler.valid())
4459     {
4460         viewInfo.components.r = gl_vk::GetSwizzle(swizzleMap.swizzleRed);
4461         viewInfo.components.g = gl_vk::GetSwizzle(swizzleMap.swizzleGreen);
4462         viewInfo.components.b = gl_vk::GetSwizzle(swizzleMap.swizzleBlue);
4463         viewInfo.components.a = gl_vk::GetSwizzle(swizzleMap.swizzleAlpha);
4464     }
4465     else
4466     {
4467         viewInfo.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
4468         viewInfo.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
4469         viewInfo.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
4470         viewInfo.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
4471     }
4472     viewInfo.subresourceRange.aspectMask     = aspectMask;
4473     viewInfo.subresourceRange.baseMipLevel   = baseMipLevelVk.get();
4474     viewInfo.subresourceRange.levelCount     = levelCount;
4475     viewInfo.subresourceRange.baseArrayLayer = baseArrayLayer;
4476     viewInfo.subresourceRange.layerCount     = layerCount;
4477 
4478     viewInfo.pNext = imageViewUsageCreateInfo;
4479 
4480     VkSamplerYcbcrConversionInfo yuvConversionInfo = {};
4481     if (mYuvConversionSampler.valid())
4482     {
4483         ASSERT((context->getRenderer()->getFeatures().supportsYUVSamplerConversion.enabled));
4484         yuvConversionInfo.sType      = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO;
4485         yuvConversionInfo.pNext      = nullptr;
4486         yuvConversionInfo.conversion = mYuvConversionSampler.get().getHandle();
4487         AddToPNextChain(&viewInfo, &yuvConversionInfo);
4488 
4489         // VUID-VkImageViewCreateInfo-image-02399
4490         // If image has an external format, format must be VK_FORMAT_UNDEFINED
4491         if (mExternalFormat)
4492         {
4493             viewInfo.format = VK_FORMAT_UNDEFINED;
4494         }
4495     }
4496     ANGLE_VK_TRY(context, imageViewOut->init(context->getDevice(), viewInfo));
4497     return angle::Result::Continue;
4498 }
4499 
initReinterpretedLayerImageView(Context * context,gl::TextureType textureType,VkImageAspectFlags aspectMask,const gl::SwizzleState & swizzleMap,ImageView * imageViewOut,LevelIndex baseMipLevelVk,uint32_t levelCount,uint32_t baseArrayLayer,uint32_t layerCount,VkImageUsageFlags imageUsageFlags,angle::FormatID imageViewFormat) const4500 angle::Result ImageHelper::initReinterpretedLayerImageView(Context *context,
4501                                                            gl::TextureType textureType,
4502                                                            VkImageAspectFlags aspectMask,
4503                                                            const gl::SwizzleState &swizzleMap,
4504                                                            ImageView *imageViewOut,
4505                                                            LevelIndex baseMipLevelVk,
4506                                                            uint32_t levelCount,
4507                                                            uint32_t baseArrayLayer,
4508                                                            uint32_t layerCount,
4509                                                            VkImageUsageFlags imageUsageFlags,
4510                                                            angle::FormatID imageViewFormat) const
4511 {
4512     VkImageViewUsageCreateInfo imageViewUsageCreateInfo = {};
4513     imageViewUsageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO;
4514     imageViewUsageCreateInfo.usage =
4515         imageUsageFlags & GetMaximalImageUsageFlags(context->getRenderer(), imageViewFormat);
4516 
4517     return initLayerImageViewImpl(context, textureType, aspectMask, swizzleMap, imageViewOut,
4518                                   baseMipLevelVk, levelCount, baseArrayLayer, layerCount,
4519                                   vk::GetVkFormatFromFormatID(imageViewFormat),
4520                                   &imageViewUsageCreateInfo);
4521 }
4522 
destroy(RendererVk * renderer)4523 void ImageHelper::destroy(RendererVk *renderer)
4524 {
4525     VkDevice device = renderer->getDevice();
4526 
4527     mImage.destroy(device);
4528     mDeviceMemory.destroy(device);
4529     mStagingBuffer.destroy(renderer);
4530     mCurrentLayout = ImageLayout::Undefined;
4531     mImageType     = VK_IMAGE_TYPE_2D;
4532     mLayerCount    = 0;
4533     mLevelCount    = 0;
4534 
4535     setEntireContentUndefined();
4536 }
4537 
init2DWeakReference(Context * context,VkImage handle,const gl::Extents & glExtents,bool rotatedAspectRatio,const Format & format,GLint samples,bool isRobustResourceInitEnabled)4538 void ImageHelper::init2DWeakReference(Context *context,
4539                                       VkImage handle,
4540                                       const gl::Extents &glExtents,
4541                                       bool rotatedAspectRatio,
4542                                       const Format &format,
4543                                       GLint samples,
4544                                       bool isRobustResourceInitEnabled)
4545 {
4546     ASSERT(!valid());
4547     ASSERT(!IsAnySubresourceContentDefined(mContentDefined));
4548     ASSERT(!IsAnySubresourceContentDefined(mStencilContentDefined));
4549 
4550     gl_vk::GetExtent(glExtents, &mExtents);
4551     mRotatedAspectRatio = rotatedAspectRatio;
4552     mFormat             = &format;
4553     mSamples            = std::max(samples, 1);
4554     mImageSerial        = context->getRenderer()->getResourceSerialFactory().generateImageSerial();
4555     mCurrentLayout      = ImageLayout::Undefined;
4556     mLayerCount         = 1;
4557     mLevelCount         = 1;
4558 
4559     mImage.setHandle(handle);
4560 
4561     stageClearIfEmulatedFormat(isRobustResourceInitEnabled);
4562 }
4563 
init2DStaging(Context * context,bool hasProtectedContent,const MemoryProperties & memoryProperties,const gl::Extents & glExtents,const Format & format,VkImageUsageFlags usage,uint32_t layerCount)4564 angle::Result ImageHelper::init2DStaging(Context *context,
4565                                          bool hasProtectedContent,
4566                                          const MemoryProperties &memoryProperties,
4567                                          const gl::Extents &glExtents,
4568                                          const Format &format,
4569                                          VkImageUsageFlags usage,
4570                                          uint32_t layerCount)
4571 {
4572     gl_vk::GetExtent(glExtents, &mExtents);
4573 
4574     return initStaging(context, hasProtectedContent, memoryProperties, VK_IMAGE_TYPE_2D, mExtents,
4575                        format, 1, usage, 1, layerCount);
4576 }
4577 
initStaging(Context * context,bool hasProtectedContent,const MemoryProperties & memoryProperties,VkImageType imageType,const VkExtent3D & extents,const Format & format,GLint samples,VkImageUsageFlags usage,uint32_t mipLevels,uint32_t layerCount)4578 angle::Result ImageHelper::initStaging(Context *context,
4579                                        bool hasProtectedContent,
4580                                        const MemoryProperties &memoryProperties,
4581                                        VkImageType imageType,
4582                                        const VkExtent3D &extents,
4583                                        const Format &format,
4584                                        GLint samples,
4585                                        VkImageUsageFlags usage,
4586                                        uint32_t mipLevels,
4587                                        uint32_t layerCount)
4588 {
4589     ASSERT(!valid());
4590     ASSERT(!IsAnySubresourceContentDefined(mContentDefined));
4591     ASSERT(!IsAnySubresourceContentDefined(mStencilContentDefined));
4592 
4593     mImageType          = imageType;
4594     mExtents            = extents;
4595     mRotatedAspectRatio = false;
4596     mFormat             = &format;
4597     mSamples            = std::max(samples, 1);
4598     mImageSerial        = context->getRenderer()->getResourceSerialFactory().generateImageSerial();
4599     mLayerCount         = layerCount;
4600     mLevelCount         = mipLevels;
4601     mUsage              = usage;
4602 
4603     // Validate that mLayerCount is compatible with the image type
4604     ASSERT(imageType != VK_IMAGE_TYPE_3D || mLayerCount == 1);
4605     ASSERT(imageType != VK_IMAGE_TYPE_2D || mExtents.depth == 1);
4606 
4607     mCurrentLayout = ImageLayout::Undefined;
4608 
4609     VkImageCreateInfo imageInfo     = {};
4610     imageInfo.sType                 = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
4611     imageInfo.flags                 = hasProtectedContent ? VK_IMAGE_CREATE_PROTECTED_BIT : 0;
4612     imageInfo.imageType             = mImageType;
4613     imageInfo.format                = format.actualImageVkFormat();
4614     imageInfo.extent                = mExtents;
4615     imageInfo.mipLevels             = mLevelCount;
4616     imageInfo.arrayLayers           = mLayerCount;
4617     imageInfo.samples               = gl_vk::GetSamples(mSamples);
4618     imageInfo.tiling                = VK_IMAGE_TILING_OPTIMAL;
4619     imageInfo.usage                 = usage;
4620     imageInfo.sharingMode           = VK_SHARING_MODE_EXCLUSIVE;
4621     imageInfo.queueFamilyIndexCount = 0;
4622     imageInfo.pQueueFamilyIndices   = nullptr;
4623     imageInfo.initialLayout         = getCurrentLayout();
4624 
4625     ANGLE_VK_TRY(context, mImage.init(context->getDevice(), imageInfo));
4626 
4627     // Allocate and bind device-local memory.
4628     VkMemoryPropertyFlags memoryPropertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
4629     if (hasProtectedContent)
4630     {
4631         memoryPropertyFlags |= VK_MEMORY_PROPERTY_PROTECTED_BIT;
4632     }
4633     ANGLE_TRY(initMemory(context, hasProtectedContent, memoryProperties, memoryPropertyFlags));
4634 
4635     return angle::Result::Continue;
4636 }
4637 
initImplicitMultisampledRenderToTexture(Context * context,bool hasProtectedContent,const MemoryProperties & memoryProperties,gl::TextureType textureType,GLint samples,const ImageHelper & resolveImage,bool isRobustResourceInitEnabled)4638 angle::Result ImageHelper::initImplicitMultisampledRenderToTexture(
4639     Context *context,
4640     bool hasProtectedContent,
4641     const MemoryProperties &memoryProperties,
4642     gl::TextureType textureType,
4643     GLint samples,
4644     const ImageHelper &resolveImage,
4645     bool isRobustResourceInitEnabled)
4646 {
4647     ASSERT(!valid());
4648     ASSERT(samples > 1);
4649     ASSERT(!IsAnySubresourceContentDefined(mContentDefined));
4650     ASSERT(!IsAnySubresourceContentDefined(mStencilContentDefined));
4651 
4652     // The image is used as either color or depth/stencil attachment.  Additionally, its memory is
4653     // lazily allocated as the contents are discarded at the end of the renderpass and with tiling
4654     // GPUs no actual backing memory is required.
4655     //
4656     // Note that the Vulkan image is created with or without VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT
4657     // based on whether the memory that will be used to create the image would have
4658     // VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT.  TRANSIENT is provided if there is any memory that
4659     // supports LAZILY_ALLOCATED.  However, based on actual image requirements, such a memory may
4660     // not be suitable for the image.  We don't support such a case, which will result in the
4661     // |initMemory| call below failing.
4662     const bool hasLazilyAllocatedMemory = memoryProperties.hasLazilyAllocatedMemory();
4663 
4664     const VkImageUsageFlags kMultisampledUsageFlags =
4665         (hasLazilyAllocatedMemory ? VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT : 0) |
4666         (resolveImage.getAspectFlags() == VK_IMAGE_ASPECT_COLOR_BIT
4667              ? VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
4668              : VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
4669     const VkImageCreateFlags kMultisampledCreateFlags =
4670         hasProtectedContent ? VK_IMAGE_CREATE_PROTECTED_BIT : 0;
4671 
4672     ANGLE_TRY(initExternal(
4673         context, textureType, resolveImage.getExtents(), resolveImage.getFormat(), samples,
4674         kMultisampledUsageFlags, kMultisampledCreateFlags, ImageLayout::Undefined, nullptr,
4675         resolveImage.getFirstAllocatedLevel(), resolveImage.getLevelCount(),
4676         resolveImage.getLayerCount(), isRobustResourceInitEnabled, nullptr, hasProtectedContent));
4677 
4678     const VkMemoryPropertyFlags kMultisampledMemoryFlags =
4679         VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
4680         (hasLazilyAllocatedMemory ? VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT : 0) |
4681         (hasProtectedContent ? VK_MEMORY_PROPERTY_PROTECTED_BIT : 0);
4682 
4683     // If this ever fails, this code should be modified to retry creating the image without the
4684     // TRANSIENT flag.
4685     ANGLE_TRY(initMemory(context, hasProtectedContent, memoryProperties, kMultisampledMemoryFlags));
4686 
4687     // Remove the emulated format clear from the multisampled image if any.  There is one already
4688     // staged on the resolve image if needed.
4689     removeStagedUpdates(context, getFirstAllocatedLevel(), getLastAllocatedLevel());
4690 
4691     return angle::Result::Continue;
4692 }
4693 
getAspectFlags() const4694 VkImageAspectFlags ImageHelper::getAspectFlags() const
4695 {
4696     return GetFormatAspectFlags(mFormat->actualImageFormat());
4697 }
4698 
isCombinedDepthStencilFormat() const4699 bool ImageHelper::isCombinedDepthStencilFormat() const
4700 {
4701     return (getAspectFlags() & kDepthStencilAspects) == kDepthStencilAspects;
4702 }
4703 
getCurrentLayout() const4704 VkImageLayout ImageHelper::getCurrentLayout() const
4705 {
4706     return ConvertImageLayoutToVkImageLayout(mCurrentLayout);
4707 }
4708 
getLevelExtents(LevelIndex levelVk) const4709 gl::Extents ImageHelper::getLevelExtents(LevelIndex levelVk) const
4710 {
4711     // Level 0 should be the size of the extents, after that every time you increase a level
4712     // you shrink the extents by half.
4713     uint32_t width  = std::max(mExtents.width >> levelVk.get(), 1u);
4714     uint32_t height = std::max(mExtents.height >> levelVk.get(), 1u);
4715     uint32_t depth  = std::max(mExtents.depth >> levelVk.get(), 1u);
4716 
4717     return gl::Extents(width, height, depth);
4718 }
4719 
getLevelExtents2D(LevelIndex levelVk) const4720 gl::Extents ImageHelper::getLevelExtents2D(LevelIndex levelVk) const
4721 {
4722     gl::Extents extents = getLevelExtents(levelVk);
4723     extents.depth       = 1;
4724     return extents;
4725 }
4726 
getRotatedExtents() const4727 const VkExtent3D ImageHelper::getRotatedExtents() const
4728 {
4729     VkExtent3D extents = mExtents;
4730     if (mRotatedAspectRatio)
4731     {
4732         std::swap(extents.width, extents.height);
4733     }
4734     return extents;
4735 }
4736 
getRotatedLevelExtents2D(LevelIndex levelVk) const4737 gl::Extents ImageHelper::getRotatedLevelExtents2D(LevelIndex levelVk) const
4738 {
4739     gl::Extents extents = getLevelExtents2D(levelVk);
4740     if (mRotatedAspectRatio)
4741     {
4742         std::swap(extents.width, extents.height);
4743     }
4744     return extents;
4745 }
4746 
isDepthOrStencil() const4747 bool ImageHelper::isDepthOrStencil() const
4748 {
4749     return mFormat->actualImageFormat().hasDepthOrStencilBits();
4750 }
4751 
setRenderPassUsageFlag(RenderPassUsage flag)4752 void ImageHelper::setRenderPassUsageFlag(RenderPassUsage flag)
4753 {
4754     mRenderPassUsageFlags.set(flag);
4755 }
4756 
clearRenderPassUsageFlag(RenderPassUsage flag)4757 void ImageHelper::clearRenderPassUsageFlag(RenderPassUsage flag)
4758 {
4759     mRenderPassUsageFlags.reset(flag);
4760 }
4761 
resetRenderPassUsageFlags()4762 void ImageHelper::resetRenderPassUsageFlags()
4763 {
4764     mRenderPassUsageFlags.reset();
4765 }
4766 
hasRenderPassUsageFlag(RenderPassUsage flag) const4767 bool ImageHelper::hasRenderPassUsageFlag(RenderPassUsage flag) const
4768 {
4769     return mRenderPassUsageFlags.test(flag);
4770 }
4771 
usedByCurrentRenderPassAsAttachmentAndSampler() const4772 bool ImageHelper::usedByCurrentRenderPassAsAttachmentAndSampler() const
4773 {
4774     return mRenderPassUsageFlags[RenderPassUsage::RenderTargetAttachment] &&
4775            mRenderPassUsageFlags[RenderPassUsage::TextureSampler];
4776 }
4777 
isReadBarrierNecessary(ImageLayout newLayout) const4778 bool ImageHelper::isReadBarrierNecessary(ImageLayout newLayout) const
4779 {
4780     // If transitioning to a different layout, we need always need a barrier.
4781     if (mCurrentLayout != newLayout)
4782     {
4783         return true;
4784     }
4785 
4786     // RAR (read-after-read) is not a hazard and doesn't require a barrier.
4787     //
4788     // RAW (read-after-write) hazards always require a memory barrier.  This can only happen if the
4789     // layout (same as new layout) is writable which in turn is only possible if the image is
4790     // simultaneously bound for shader write (i.e. the layout is GENERAL).
4791     const ImageMemoryBarrierData &layoutData = kImageMemoryBarrierData[mCurrentLayout];
4792     return layoutData.type == ResourceAccess::Write;
4793 }
4794 
changeLayoutAndQueue(Context * context,VkImageAspectFlags aspectMask,ImageLayout newLayout,uint32_t newQueueFamilyIndex,CommandBuffer * commandBuffer)4795 void ImageHelper::changeLayoutAndQueue(Context *context,
4796                                        VkImageAspectFlags aspectMask,
4797                                        ImageLayout newLayout,
4798                                        uint32_t newQueueFamilyIndex,
4799                                        CommandBuffer *commandBuffer)
4800 {
4801     ASSERT(isQueueChangeNeccesary(newQueueFamilyIndex));
4802     barrierImpl(context, aspectMask, newLayout, newQueueFamilyIndex, commandBuffer);
4803 }
4804 
acquireFromExternal(ContextVk * contextVk,uint32_t externalQueueFamilyIndex,uint32_t rendererQueueFamilyIndex,ImageLayout currentLayout,CommandBuffer * commandBuffer)4805 void ImageHelper::acquireFromExternal(ContextVk *contextVk,
4806                                       uint32_t externalQueueFamilyIndex,
4807                                       uint32_t rendererQueueFamilyIndex,
4808                                       ImageLayout currentLayout,
4809                                       CommandBuffer *commandBuffer)
4810 {
4811     // The image must be newly allocated or have been released to the external
4812     // queue. If this is not the case, it's an application bug, so ASSERT might
4813     // eventually need to change to a warning.
4814     ASSERT(mCurrentLayout == ImageLayout::Undefined ||
4815            mCurrentQueueFamilyIndex == externalQueueFamilyIndex);
4816 
4817     mCurrentLayout           = currentLayout;
4818     mCurrentQueueFamilyIndex = externalQueueFamilyIndex;
4819 
4820     changeLayoutAndQueue(contextVk, getAspectFlags(), mCurrentLayout, rendererQueueFamilyIndex,
4821                          commandBuffer);
4822 
4823     // It is unknown how the external has modified the image, so assume every subresource has
4824     // defined content.  That is unless the layout is Undefined.
4825     if (currentLayout == ImageLayout::Undefined)
4826     {
4827         setEntireContentUndefined();
4828     }
4829     else
4830     {
4831         setEntireContentDefined();
4832     }
4833 }
4834 
releaseToExternal(ContextVk * contextVk,uint32_t rendererQueueFamilyIndex,uint32_t externalQueueFamilyIndex,ImageLayout desiredLayout,CommandBuffer * commandBuffer)4835 void ImageHelper::releaseToExternal(ContextVk *contextVk,
4836                                     uint32_t rendererQueueFamilyIndex,
4837                                     uint32_t externalQueueFamilyIndex,
4838                                     ImageLayout desiredLayout,
4839                                     CommandBuffer *commandBuffer)
4840 {
4841     ASSERT(mCurrentQueueFamilyIndex == rendererQueueFamilyIndex);
4842 
4843     changeLayoutAndQueue(contextVk, getAspectFlags(), desiredLayout, externalQueueFamilyIndex,
4844                          commandBuffer);
4845 }
4846 
isReleasedToExternal() const4847 bool ImageHelper::isReleasedToExternal() const
4848 {
4849 #if !defined(ANGLE_PLATFORM_MACOS) && !defined(ANGLE_PLATFORM_ANDROID)
4850     return IsExternalQueueFamily(mCurrentQueueFamilyIndex);
4851 #else
4852     // TODO(anglebug.com/4635): Implement external memory barriers on Mac/Android.
4853     return false;
4854 #endif
4855 }
4856 
setFirstAllocatedLevel(gl::LevelIndex firstLevel)4857 void ImageHelper::setFirstAllocatedLevel(gl::LevelIndex firstLevel)
4858 {
4859     ASSERT(!valid());
4860     mFirstAllocatedLevel = firstLevel;
4861 }
4862 
toVkLevel(gl::LevelIndex levelIndexGL) const4863 LevelIndex ImageHelper::toVkLevel(gl::LevelIndex levelIndexGL) const
4864 {
4865     return gl_vk::GetLevelIndex(levelIndexGL, mFirstAllocatedLevel);
4866 }
4867 
toGLLevel(LevelIndex levelIndexVk) const4868 gl::LevelIndex ImageHelper::toGLLevel(LevelIndex levelIndexVk) const
4869 {
4870     return vk_gl::GetLevelIndex(levelIndexVk, mFirstAllocatedLevel);
4871 }
4872 
initImageMemoryBarrierStruct(VkImageAspectFlags aspectMask,ImageLayout newLayout,uint32_t newQueueFamilyIndex,VkImageMemoryBarrier * imageMemoryBarrier) const4873 ANGLE_INLINE void ImageHelper::initImageMemoryBarrierStruct(
4874     VkImageAspectFlags aspectMask,
4875     ImageLayout newLayout,
4876     uint32_t newQueueFamilyIndex,
4877     VkImageMemoryBarrier *imageMemoryBarrier) const
4878 {
4879     const ImageMemoryBarrierData &transitionFrom = kImageMemoryBarrierData[mCurrentLayout];
4880     const ImageMemoryBarrierData &transitionTo   = kImageMemoryBarrierData[newLayout];
4881 
4882     imageMemoryBarrier->sType               = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
4883     imageMemoryBarrier->srcAccessMask       = transitionFrom.srcAccessMask;
4884     imageMemoryBarrier->dstAccessMask       = transitionTo.dstAccessMask;
4885     imageMemoryBarrier->oldLayout           = transitionFrom.layout;
4886     imageMemoryBarrier->newLayout           = transitionTo.layout;
4887     imageMemoryBarrier->srcQueueFamilyIndex = mCurrentQueueFamilyIndex;
4888     imageMemoryBarrier->dstQueueFamilyIndex = newQueueFamilyIndex;
4889     imageMemoryBarrier->image               = mImage.getHandle();
4890 
4891     // Transition the whole resource.
4892     imageMemoryBarrier->subresourceRange.aspectMask     = aspectMask;
4893     imageMemoryBarrier->subresourceRange.baseMipLevel   = 0;
4894     imageMemoryBarrier->subresourceRange.levelCount     = mLevelCount;
4895     imageMemoryBarrier->subresourceRange.baseArrayLayer = 0;
4896     imageMemoryBarrier->subresourceRange.layerCount     = mLayerCount;
4897 }
4898 
4899 // Generalized to accept both "primary" and "secondary" command buffers.
4900 template <typename CommandBufferT>
barrierImpl(Context * context,VkImageAspectFlags aspectMask,ImageLayout newLayout,uint32_t newQueueFamilyIndex,CommandBufferT * commandBuffer)4901 void ImageHelper::barrierImpl(Context *context,
4902                               VkImageAspectFlags aspectMask,
4903                               ImageLayout newLayout,
4904                               uint32_t newQueueFamilyIndex,
4905                               CommandBufferT *commandBuffer)
4906 {
4907     const ImageMemoryBarrierData &transitionFrom = kImageMemoryBarrierData[mCurrentLayout];
4908     const ImageMemoryBarrierData &transitionTo   = kImageMemoryBarrierData[newLayout];
4909 
4910     VkImageMemoryBarrier imageMemoryBarrier = {};
4911     initImageMemoryBarrierStruct(aspectMask, newLayout, newQueueFamilyIndex, &imageMemoryBarrier);
4912 
4913     // There might be other shaderRead operations there other than the current layout.
4914     VkPipelineStageFlags srcStageMask = GetImageLayoutSrcStageMask(context, transitionFrom);
4915     if (mCurrentShaderReadStageMask)
4916     {
4917         srcStageMask |= mCurrentShaderReadStageMask;
4918         mCurrentShaderReadStageMask  = 0;
4919         mLastNonShaderReadOnlyLayout = ImageLayout::Undefined;
4920     }
4921     commandBuffer->imageBarrier(srcStageMask, GetImageLayoutDstStageMask(context, transitionTo),
4922                                 imageMemoryBarrier);
4923 
4924     mCurrentLayout           = newLayout;
4925     mCurrentQueueFamilyIndex = newQueueFamilyIndex;
4926 }
4927 
4928 template void ImageHelper::barrierImpl<priv::SecondaryCommandBuffer>(
4929     Context *context,
4930     VkImageAspectFlags aspectMask,
4931     ImageLayout newLayout,
4932     uint32_t newQueueFamilyIndex,
4933     priv::SecondaryCommandBuffer *commandBuffer);
4934 
updateLayoutAndBarrier(Context * context,VkImageAspectFlags aspectMask,ImageLayout newLayout,PipelineBarrier * barrier)4935 bool ImageHelper::updateLayoutAndBarrier(Context *context,
4936                                          VkImageAspectFlags aspectMask,
4937                                          ImageLayout newLayout,
4938                                          PipelineBarrier *barrier)
4939 {
4940     bool barrierModified = false;
4941     if (newLayout == mCurrentLayout)
4942     {
4943         const ImageMemoryBarrierData &layoutData = kImageMemoryBarrierData[mCurrentLayout];
4944         // RAR is not a hazard and doesn't require a barrier, especially as the image layout hasn't
4945         // changed.  The following asserts that such a barrier is not attempted.
4946         ASSERT(layoutData.type == ResourceAccess::Write);
4947         // No layout change, only memory barrier is required
4948         barrier->mergeMemoryBarrier(GetImageLayoutSrcStageMask(context, layoutData),
4949                                     GetImageLayoutDstStageMask(context, layoutData),
4950                                     layoutData.srcAccessMask, layoutData.dstAccessMask);
4951         barrierModified = true;
4952     }
4953     else
4954     {
4955         const ImageMemoryBarrierData &transitionFrom = kImageMemoryBarrierData[mCurrentLayout];
4956         const ImageMemoryBarrierData &transitionTo   = kImageMemoryBarrierData[newLayout];
4957         VkPipelineStageFlags srcStageMask = GetImageLayoutSrcStageMask(context, transitionFrom);
4958         VkPipelineStageFlags dstStageMask = GetImageLayoutDstStageMask(context, transitionTo);
4959 
4960         if (IsShaderReadOnlyLayout(transitionTo) && IsShaderReadOnlyLayout(transitionFrom))
4961         {
4962             // If we are switching between different shader stage reads, then there is no actual
4963             // layout change or access type change. We only need a barrier if we are making a read
4964             // that is from a new stage. Also note that we barrier against previous non-shaderRead
4965             // layout. We do not barrier between one shaderRead and another shaderRead.
4966             bool isNewReadStage = (mCurrentShaderReadStageMask & dstStageMask) != dstStageMask;
4967             if (isNewReadStage)
4968             {
4969                 const ImageMemoryBarrierData &layoutData =
4970                     kImageMemoryBarrierData[mLastNonShaderReadOnlyLayout];
4971                 barrier->mergeMemoryBarrier(GetImageLayoutSrcStageMask(context, layoutData),
4972                                             dstStageMask, layoutData.srcAccessMask,
4973                                             transitionTo.dstAccessMask);
4974                 barrierModified = true;
4975                 // Accumulate new read stage.
4976                 mCurrentShaderReadStageMask |= dstStageMask;
4977             }
4978         }
4979         else
4980         {
4981             VkImageMemoryBarrier imageMemoryBarrier = {};
4982             initImageMemoryBarrierStruct(aspectMask, newLayout, mCurrentQueueFamilyIndex,
4983                                          &imageMemoryBarrier);
4984             // if we transition from shaderReadOnly, we must add in stashed shader stage masks since
4985             // there might be outstanding shader reads from stages other than current layout. We do
4986             // not insert barrier between one shaderRead to another shaderRead
4987             if (mCurrentShaderReadStageMask)
4988             {
4989                 srcStageMask |= mCurrentShaderReadStageMask;
4990                 mCurrentShaderReadStageMask  = 0;
4991                 mLastNonShaderReadOnlyLayout = ImageLayout::Undefined;
4992             }
4993             barrier->mergeImageBarrier(srcStageMask, dstStageMask, imageMemoryBarrier);
4994             barrierModified = true;
4995 
4996             // If we are transition into shaderRead layout, remember the last
4997             // non-shaderRead layout here.
4998             if (IsShaderReadOnlyLayout(transitionTo))
4999             {
5000                 ASSERT(!IsShaderReadOnlyLayout(transitionFrom));
5001                 mLastNonShaderReadOnlyLayout = mCurrentLayout;
5002                 mCurrentShaderReadStageMask  = dstStageMask;
5003             }
5004         }
5005         mCurrentLayout = newLayout;
5006     }
5007     return barrierModified;
5008 }
5009 
clearColor(const VkClearColorValue & color,LevelIndex baseMipLevelVk,uint32_t levelCount,uint32_t baseArrayLayer,uint32_t layerCount,CommandBuffer * commandBuffer)5010 void ImageHelper::clearColor(const VkClearColorValue &color,
5011                              LevelIndex baseMipLevelVk,
5012                              uint32_t levelCount,
5013                              uint32_t baseArrayLayer,
5014                              uint32_t layerCount,
5015                              CommandBuffer *commandBuffer)
5016 {
5017     ASSERT(valid());
5018 
5019     ASSERT(mCurrentLayout == ImageLayout::TransferDst);
5020 
5021     VkImageSubresourceRange range = {};
5022     range.aspectMask              = VK_IMAGE_ASPECT_COLOR_BIT;
5023     range.baseMipLevel            = baseMipLevelVk.get();
5024     range.levelCount              = levelCount;
5025     range.baseArrayLayer          = baseArrayLayer;
5026     range.layerCount              = layerCount;
5027 
5028     if (mImageType == VK_IMAGE_TYPE_3D)
5029     {
5030         ASSERT(baseArrayLayer == 0);
5031         ASSERT(layerCount == 1 ||
5032                layerCount == static_cast<uint32_t>(getLevelExtents(baseMipLevelVk).depth));
5033         range.layerCount = 1;
5034     }
5035 
5036     commandBuffer->clearColorImage(mImage, getCurrentLayout(), color, 1, &range);
5037 }
5038 
clearDepthStencil(VkImageAspectFlags clearAspectFlags,const VkClearDepthStencilValue & depthStencil,LevelIndex baseMipLevelVk,uint32_t levelCount,uint32_t baseArrayLayer,uint32_t layerCount,CommandBuffer * commandBuffer)5039 void ImageHelper::clearDepthStencil(VkImageAspectFlags clearAspectFlags,
5040                                     const VkClearDepthStencilValue &depthStencil,
5041                                     LevelIndex baseMipLevelVk,
5042                                     uint32_t levelCount,
5043                                     uint32_t baseArrayLayer,
5044                                     uint32_t layerCount,
5045                                     CommandBuffer *commandBuffer)
5046 {
5047     ASSERT(valid());
5048 
5049     ASSERT(mCurrentLayout == ImageLayout::TransferDst);
5050 
5051     VkImageSubresourceRange range = {};
5052     range.aspectMask              = clearAspectFlags;
5053     range.baseMipLevel            = baseMipLevelVk.get();
5054     range.levelCount              = levelCount;
5055     range.baseArrayLayer          = baseArrayLayer;
5056     range.layerCount              = layerCount;
5057 
5058     if (mImageType == VK_IMAGE_TYPE_3D)
5059     {
5060         ASSERT(baseArrayLayer == 0);
5061         ASSERT(layerCount == 1 ||
5062                layerCount == static_cast<uint32_t>(getLevelExtents(baseMipLevelVk).depth));
5063         range.layerCount = 1;
5064     }
5065 
5066     commandBuffer->clearDepthStencilImage(mImage, getCurrentLayout(), depthStencil, 1, &range);
5067 }
5068 
clear(VkImageAspectFlags aspectFlags,const VkClearValue & value,LevelIndex mipLevel,uint32_t baseArrayLayer,uint32_t layerCount,CommandBuffer * commandBuffer)5069 void ImageHelper::clear(VkImageAspectFlags aspectFlags,
5070                         const VkClearValue &value,
5071                         LevelIndex mipLevel,
5072                         uint32_t baseArrayLayer,
5073                         uint32_t layerCount,
5074                         CommandBuffer *commandBuffer)
5075 {
5076     const angle::Format &angleFormat = mFormat->actualImageFormat();
5077     bool isDepthStencil              = angleFormat.depthBits > 0 || angleFormat.stencilBits > 0;
5078 
5079     if (isDepthStencil)
5080     {
5081         clearDepthStencil(aspectFlags, value.depthStencil, mipLevel, 1, baseArrayLayer, layerCount,
5082                           commandBuffer);
5083     }
5084     else
5085     {
5086         ASSERT(!angleFormat.isBlock);
5087 
5088         clearColor(value.color, mipLevel, 1, baseArrayLayer, layerCount, commandBuffer);
5089     }
5090 }
5091 
5092 // static
Copy(ImageHelper * srcImage,ImageHelper * dstImage,const gl::Offset & srcOffset,const gl::Offset & dstOffset,const gl::Extents & copySize,const VkImageSubresourceLayers & srcSubresource,const VkImageSubresourceLayers & dstSubresource,CommandBuffer * commandBuffer)5093 void ImageHelper::Copy(ImageHelper *srcImage,
5094                        ImageHelper *dstImage,
5095                        const gl::Offset &srcOffset,
5096                        const gl::Offset &dstOffset,
5097                        const gl::Extents &copySize,
5098                        const VkImageSubresourceLayers &srcSubresource,
5099                        const VkImageSubresourceLayers &dstSubresource,
5100                        CommandBuffer *commandBuffer)
5101 {
5102     ASSERT(commandBuffer->valid() && srcImage->valid() && dstImage->valid());
5103 
5104     ASSERT(srcImage->getCurrentLayout() == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
5105     ASSERT(dstImage->getCurrentLayout() == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
5106 
5107     VkImageCopy region    = {};
5108     region.srcSubresource = srcSubresource;
5109     region.srcOffset.x    = srcOffset.x;
5110     region.srcOffset.y    = srcOffset.y;
5111     region.srcOffset.z    = srcOffset.z;
5112     region.dstSubresource = dstSubresource;
5113     region.dstOffset.x    = dstOffset.x;
5114     region.dstOffset.y    = dstOffset.y;
5115     region.dstOffset.z    = dstOffset.z;
5116     region.extent.width   = copySize.width;
5117     region.extent.height  = copySize.height;
5118     region.extent.depth   = copySize.depth;
5119 
5120     commandBuffer->copyImage(srcImage->getImage(), srcImage->getCurrentLayout(),
5121                              dstImage->getImage(), dstImage->getCurrentLayout(), 1, &region);
5122 }
5123 
5124 // static
CopyImageSubData(const gl::Context * context,ImageHelper * srcImage,GLint srcLevel,GLint srcX,GLint srcY,GLint srcZ,ImageHelper * dstImage,GLint dstLevel,GLint dstX,GLint dstY,GLint dstZ,GLsizei srcWidth,GLsizei srcHeight,GLsizei srcDepth)5125 angle::Result ImageHelper::CopyImageSubData(const gl::Context *context,
5126                                             ImageHelper *srcImage,
5127                                             GLint srcLevel,
5128                                             GLint srcX,
5129                                             GLint srcY,
5130                                             GLint srcZ,
5131                                             ImageHelper *dstImage,
5132                                             GLint dstLevel,
5133                                             GLint dstX,
5134                                             GLint dstY,
5135                                             GLint dstZ,
5136                                             GLsizei srcWidth,
5137                                             GLsizei srcHeight,
5138                                             GLsizei srcDepth)
5139 {
5140     ContextVk *contextVk = GetImpl(context);
5141 
5142     const Format &sourceVkFormat = srcImage->getFormat();
5143     VkImageTiling srcTilingMode  = srcImage->getTilingMode();
5144     const Format &destVkFormat   = dstImage->getFormat();
5145     VkImageTiling destTilingMode = dstImage->getTilingMode();
5146 
5147     const gl::LevelIndex srcLevelGL = gl::LevelIndex(srcLevel);
5148     const gl::LevelIndex dstLevelGL = gl::LevelIndex(dstLevel);
5149 
5150     if (CanCopyWithTransferForCopyImage(contextVk->getRenderer(), sourceVkFormat, srcTilingMode,
5151                                         destVkFormat, destTilingMode))
5152     {
5153         bool isSrc3D = srcImage->getType() == VK_IMAGE_TYPE_3D;
5154         bool isDst3D = dstImage->getType() == VK_IMAGE_TYPE_3D;
5155 
5156         srcImage->retain(&contextVk->getResourceUseList());
5157         dstImage->retain(&contextVk->getResourceUseList());
5158 
5159         VkImageCopy region = {};
5160 
5161         region.srcSubresource.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT;
5162         region.srcSubresource.mipLevel       = srcImage->toVkLevel(srcLevelGL).get();
5163         region.srcSubresource.baseArrayLayer = isSrc3D ? 0 : srcZ;
5164         region.srcSubresource.layerCount     = isSrc3D ? 1 : srcDepth;
5165 
5166         region.dstSubresource.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT;
5167         region.dstSubresource.mipLevel       = dstImage->toVkLevel(dstLevelGL).get();
5168         region.dstSubresource.baseArrayLayer = isDst3D ? 0 : dstZ;
5169         region.dstSubresource.layerCount     = isDst3D ? 1 : srcDepth;
5170 
5171         region.srcOffset.x   = srcX;
5172         region.srcOffset.y   = srcY;
5173         region.srcOffset.z   = isSrc3D ? srcZ : 0;
5174         region.dstOffset.x   = dstX;
5175         region.dstOffset.y   = dstY;
5176         region.dstOffset.z   = isDst3D ? dstZ : 0;
5177         region.extent.width  = srcWidth;
5178         region.extent.height = srcHeight;
5179         region.extent.depth  = (isSrc3D || isDst3D) ? srcDepth : 1;
5180 
5181         CommandBufferAccess access;
5182         access.onImageTransferRead(VK_IMAGE_ASPECT_COLOR_BIT, srcImage);
5183         access.onImageTransferWrite(dstLevelGL, 1, region.dstSubresource.baseArrayLayer,
5184                                     region.dstSubresource.layerCount, VK_IMAGE_ASPECT_COLOR_BIT,
5185                                     dstImage);
5186 
5187         CommandBuffer *commandBuffer;
5188         ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(access, &commandBuffer));
5189 
5190         ASSERT(srcImage->valid() && dstImage->valid());
5191         ASSERT(srcImage->getCurrentLayout() == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
5192         ASSERT(dstImage->getCurrentLayout() == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
5193 
5194         commandBuffer->copyImage(srcImage->getImage(), srcImage->getCurrentLayout(),
5195                                  dstImage->getImage(), dstImage->getCurrentLayout(), 1, &region);
5196     }
5197     else if (!sourceVkFormat.intendedFormat().isBlock && !destVkFormat.intendedFormat().isBlock)
5198     {
5199         // The source and destination image formats may be using a fallback in the case of RGB
5200         // images.  A compute shader is used in such a case to perform the copy.
5201         UtilsVk &utilsVk = contextVk->getUtils();
5202 
5203         UtilsVk::CopyImageBitsParameters params;
5204         params.srcOffset[0]   = srcX;
5205         params.srcOffset[1]   = srcY;
5206         params.srcOffset[2]   = srcZ;
5207         params.srcLevel       = srcLevelGL;
5208         params.dstOffset[0]   = dstX;
5209         params.dstOffset[1]   = dstY;
5210         params.dstOffset[2]   = dstZ;
5211         params.dstLevel       = dstLevelGL;
5212         params.copyExtents[0] = srcWidth;
5213         params.copyExtents[1] = srcHeight;
5214         params.copyExtents[2] = srcDepth;
5215 
5216         ANGLE_TRY(utilsVk.copyImageBits(contextVk, dstImage, srcImage, params));
5217     }
5218     else
5219     {
5220         // No support for emulated compressed formats.
5221         UNIMPLEMENTED();
5222         ANGLE_VK_CHECK(contextVk, false, VK_ERROR_FEATURE_NOT_PRESENT);
5223     }
5224 
5225     return angle::Result::Continue;
5226 }
5227 
generateMipmapsWithBlit(ContextVk * contextVk,LevelIndex baseLevel,LevelIndex maxLevel)5228 angle::Result ImageHelper::generateMipmapsWithBlit(ContextVk *contextVk,
5229                                                    LevelIndex baseLevel,
5230                                                    LevelIndex maxLevel)
5231 {
5232     CommandBufferAccess access;
5233     gl::LevelIndex baseLevelGL = toGLLevel(baseLevel);
5234     access.onImageTransferWrite(baseLevelGL + 1, maxLevel.get(), 0, mLayerCount,
5235                                 VK_IMAGE_ASPECT_COLOR_BIT, this);
5236 
5237     CommandBuffer *commandBuffer;
5238     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(access, &commandBuffer));
5239 
5240     // We are able to use blitImage since the image format we are using supports it.
5241     int32_t mipWidth  = mExtents.width;
5242     int32_t mipHeight = mExtents.height;
5243     int32_t mipDepth  = mExtents.depth;
5244 
5245     // Manually manage the image memory barrier because it uses a lot more parameters than our
5246     // usual one.
5247     VkImageMemoryBarrier barrier            = {};
5248     barrier.sType                           = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
5249     barrier.image                           = mImage.getHandle();
5250     barrier.srcQueueFamilyIndex             = VK_QUEUE_FAMILY_IGNORED;
5251     barrier.dstQueueFamilyIndex             = VK_QUEUE_FAMILY_IGNORED;
5252     barrier.subresourceRange.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT;
5253     barrier.subresourceRange.baseArrayLayer = 0;
5254     barrier.subresourceRange.layerCount     = mLayerCount;
5255     barrier.subresourceRange.levelCount     = 1;
5256 
5257     const VkFilter filter = gl_vk::GetFilter(CalculateGenerateMipmapFilter(contextVk, getFormat()));
5258 
5259     for (LevelIndex mipLevel(1); mipLevel <= LevelIndex(mLevelCount); ++mipLevel)
5260     {
5261         int32_t nextMipWidth  = std::max<int32_t>(1, mipWidth >> 1);
5262         int32_t nextMipHeight = std::max<int32_t>(1, mipHeight >> 1);
5263         int32_t nextMipDepth  = std::max<int32_t>(1, mipDepth >> 1);
5264 
5265         if (mipLevel > baseLevel && mipLevel <= maxLevel)
5266         {
5267             barrier.subresourceRange.baseMipLevel = mipLevel.get() - 1;
5268             barrier.oldLayout                     = getCurrentLayout();
5269             barrier.newLayout                     = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
5270             barrier.srcAccessMask                 = VK_ACCESS_TRANSFER_WRITE_BIT;
5271             barrier.dstAccessMask                 = VK_ACCESS_TRANSFER_READ_BIT;
5272 
5273             // We can do it for all layers at once.
5274             commandBuffer->imageBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
5275                                         VK_PIPELINE_STAGE_TRANSFER_BIT, barrier);
5276             VkImageBlit blit                   = {};
5277             blit.srcOffsets[0]                 = {0, 0, 0};
5278             blit.srcOffsets[1]                 = {mipWidth, mipHeight, mipDepth};
5279             blit.srcSubresource.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT;
5280             blit.srcSubresource.mipLevel       = mipLevel.get() - 1;
5281             blit.srcSubresource.baseArrayLayer = 0;
5282             blit.srcSubresource.layerCount     = mLayerCount;
5283             blit.dstOffsets[0]                 = {0, 0, 0};
5284             blit.dstOffsets[1]                 = {nextMipWidth, nextMipHeight, nextMipDepth};
5285             blit.dstSubresource.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT;
5286             blit.dstSubresource.mipLevel       = mipLevel.get();
5287             blit.dstSubresource.baseArrayLayer = 0;
5288             blit.dstSubresource.layerCount     = mLayerCount;
5289 
5290             commandBuffer->blitImage(mImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, mImage,
5291                                      VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit, filter);
5292         }
5293         mipWidth  = nextMipWidth;
5294         mipHeight = nextMipHeight;
5295         mipDepth  = nextMipDepth;
5296     }
5297 
5298     // Transition all mip level to the same layout so we can declare our whole image layout to one
5299     // ImageLayout. FragmentShaderReadOnly is picked here since this is the most reasonable usage
5300     // after glGenerateMipmap call.
5301     barrier.oldLayout     = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
5302     barrier.newLayout     = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
5303     barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
5304     if (baseLevel.get() > 0)
5305     {
5306         // [0:baseLevel-1] from TRANSFER_DST to SHADER_READ
5307         barrier.subresourceRange.baseMipLevel = 0;
5308         barrier.subresourceRange.levelCount   = baseLevel.get();
5309         commandBuffer->imageBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
5310                                     VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, barrier);
5311     }
5312     // [maxLevel:mLevelCount-1] from TRANSFER_DST to SHADER_READ
5313     ASSERT(mLevelCount > maxLevel.get());
5314     barrier.subresourceRange.baseMipLevel = maxLevel.get();
5315     barrier.subresourceRange.levelCount   = mLevelCount - maxLevel.get();
5316     commandBuffer->imageBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
5317                                 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, barrier);
5318     // [baseLevel:maxLevel-1] from TRANSFER_SRC to SHADER_READ
5319     barrier.oldLayout                     = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
5320     barrier.subresourceRange.baseMipLevel = baseLevel.get();
5321     barrier.subresourceRange.levelCount   = maxLevel.get() - baseLevel.get();
5322     commandBuffer->imageBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
5323                                 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, barrier);
5324 
5325     // This is just changing the internal state of the image helper so that the next call
5326     // to changeLayout will use this layout as the "oldLayout" argument.
5327     // mLastNonShaderReadOnlyLayout is used to ensure previous write are made visible to reads,
5328     // since the only write here is transfer, hence mLastNonShaderReadOnlyLayout is set to
5329     // ImageLayout::TransferDst.
5330     mLastNonShaderReadOnlyLayout = ImageLayout::TransferDst;
5331     mCurrentShaderReadStageMask  = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
5332     mCurrentLayout               = ImageLayout::FragmentShaderReadOnly;
5333 
5334     return angle::Result::Continue;
5335 }
5336 
resolve(ImageHelper * dest,const VkImageResolve & region,CommandBuffer * commandBuffer)5337 void ImageHelper::resolve(ImageHelper *dest,
5338                           const VkImageResolve &region,
5339                           CommandBuffer *commandBuffer)
5340 {
5341     ASSERT(mCurrentLayout == ImageLayout::TransferSrc);
5342     commandBuffer->resolveImage(getImage(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dest->getImage(),
5343                                 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
5344 }
5345 
removeSingleSubresourceStagedUpdates(ContextVk * contextVk,gl::LevelIndex levelIndexGL,uint32_t layerIndex,uint32_t layerCount)5346 void ImageHelper::removeSingleSubresourceStagedUpdates(ContextVk *contextVk,
5347                                                        gl::LevelIndex levelIndexGL,
5348                                                        uint32_t layerIndex,
5349                                                        uint32_t layerCount)
5350 {
5351     mCurrentSingleClearValue.reset();
5352 
5353     // Find any staged updates for this index and remove them from the pending list.
5354     std::vector<SubresourceUpdate> *levelUpdates = getLevelUpdates(levelIndexGL);
5355     if (levelUpdates == nullptr)
5356     {
5357         return;
5358     }
5359 
5360     for (size_t index = 0; index < levelUpdates->size();)
5361     {
5362         auto update = levelUpdates->begin() + index;
5363         if (update->isUpdateToLayers(layerIndex, layerCount))
5364         {
5365             update->release(contextVk->getRenderer());
5366             levelUpdates->erase(update);
5367         }
5368         else
5369         {
5370             index++;
5371         }
5372     }
5373 }
5374 
removeStagedUpdates(Context * context,gl::LevelIndex levelGLStart,gl::LevelIndex levelGLEnd)5375 void ImageHelper::removeStagedUpdates(Context *context,
5376                                       gl::LevelIndex levelGLStart,
5377                                       gl::LevelIndex levelGLEnd)
5378 {
5379     ASSERT(validateSubresourceUpdateImageRefsConsistent());
5380 
5381     // Remove all updates to levels [start, end].
5382     for (gl::LevelIndex level = levelGLStart; level <= levelGLEnd; ++level)
5383     {
5384         std::vector<SubresourceUpdate> *levelUpdates = getLevelUpdates(level);
5385         if (levelUpdates == nullptr)
5386         {
5387             ASSERT(static_cast<size_t>(level.get()) >= mSubresourceUpdates.size());
5388             return;
5389         }
5390 
5391         for (SubresourceUpdate &update : *levelUpdates)
5392         {
5393             update.release(context->getRenderer());
5394         }
5395 
5396         levelUpdates->clear();
5397     }
5398 
5399     ASSERT(validateSubresourceUpdateImageRefsConsistent());
5400 }
5401 
stageSubresourceUpdateImpl(ContextVk * contextVk,const gl::ImageIndex & index,const gl::Extents & glExtents,const gl::Offset & offset,const gl::InternalFormat & formatInfo,const gl::PixelUnpackState & unpack,DynamicBuffer * stagingBufferOverride,GLenum type,const uint8_t * pixels,const Format & vkFormat,const GLuint inputRowPitch,const GLuint inputDepthPitch,const GLuint inputSkipBytes)5402 angle::Result ImageHelper::stageSubresourceUpdateImpl(ContextVk *contextVk,
5403                                                       const gl::ImageIndex &index,
5404                                                       const gl::Extents &glExtents,
5405                                                       const gl::Offset &offset,
5406                                                       const gl::InternalFormat &formatInfo,
5407                                                       const gl::PixelUnpackState &unpack,
5408                                                       DynamicBuffer *stagingBufferOverride,
5409                                                       GLenum type,
5410                                                       const uint8_t *pixels,
5411                                                       const Format &vkFormat,
5412                                                       const GLuint inputRowPitch,
5413                                                       const GLuint inputDepthPitch,
5414                                                       const GLuint inputSkipBytes)
5415 {
5416     const angle::Format &storageFormat = vkFormat.actualImageFormat();
5417 
5418     size_t outputRowPitch;
5419     size_t outputDepthPitch;
5420     size_t stencilAllocationSize = 0;
5421     uint32_t bufferRowLength;
5422     uint32_t bufferImageHeight;
5423     size_t allocationSize;
5424 
5425     LoadImageFunctionInfo loadFunctionInfo = vkFormat.textureLoadFunctions(type);
5426     LoadImageFunction stencilLoadFunction  = nullptr;
5427 
5428     if (storageFormat.isBlock)
5429     {
5430         const gl::InternalFormat &storageFormatInfo = vkFormat.getInternalFormatInfo(type);
5431         GLuint rowPitch;
5432         GLuint depthPitch;
5433         GLuint totalSize;
5434 
5435         ANGLE_VK_CHECK_MATH(contextVk, storageFormatInfo.computeCompressedImageSize(
5436                                            gl::Extents(glExtents.width, 1, 1), &rowPitch));
5437         ANGLE_VK_CHECK_MATH(contextVk,
5438                             storageFormatInfo.computeCompressedImageSize(
5439                                 gl::Extents(glExtents.width, glExtents.height, 1), &depthPitch));
5440 
5441         ANGLE_VK_CHECK_MATH(contextVk,
5442                             storageFormatInfo.computeCompressedImageSize(glExtents, &totalSize));
5443 
5444         outputRowPitch   = rowPitch;
5445         outputDepthPitch = depthPitch;
5446         allocationSize   = totalSize;
5447 
5448         ANGLE_VK_CHECK_MATH(
5449             contextVk, storageFormatInfo.computeBufferRowLength(glExtents.width, &bufferRowLength));
5450         ANGLE_VK_CHECK_MATH(contextVk, storageFormatInfo.computeBufferImageHeight(
5451                                            glExtents.height, &bufferImageHeight));
5452     }
5453     else
5454     {
5455         ASSERT(storageFormat.pixelBytes != 0);
5456 
5457         if (storageFormat.id == angle::FormatID::D24_UNORM_S8_UINT)
5458         {
5459             stencilLoadFunction = angle::LoadX24S8ToS8;
5460         }
5461         if (storageFormat.id == angle::FormatID::D32_FLOAT_S8X24_UINT)
5462         {
5463             // If depth is D32FLOAT_S8, we must pack D32F tightly (no stencil) for CopyBufferToImage
5464             outputRowPitch = sizeof(float) * glExtents.width;
5465 
5466             // The generic load functions don't handle tightly packing D32FS8 to D32F & S8 so call
5467             // special case load functions.
5468             switch (type)
5469             {
5470                 case GL_UNSIGNED_INT:
5471                     loadFunctionInfo.loadFunction = angle::LoadD32ToD32F;
5472                     stencilLoadFunction           = nullptr;
5473                     break;
5474                 case GL_DEPTH32F_STENCIL8:
5475                 case GL_FLOAT_32_UNSIGNED_INT_24_8_REV:
5476                     loadFunctionInfo.loadFunction = angle::LoadD32FS8X24ToD32F;
5477                     stencilLoadFunction           = angle::LoadX32S8ToS8;
5478                     break;
5479                 case GL_UNSIGNED_INT_24_8_OES:
5480                     loadFunctionInfo.loadFunction = angle::LoadD24S8ToD32F;
5481                     stencilLoadFunction           = angle::LoadX24S8ToS8;
5482                     break;
5483                 default:
5484                     UNREACHABLE();
5485             }
5486         }
5487         else
5488         {
5489             outputRowPitch = storageFormat.pixelBytes * glExtents.width;
5490         }
5491         outputDepthPitch = outputRowPitch * glExtents.height;
5492 
5493         bufferRowLength   = glExtents.width;
5494         bufferImageHeight = glExtents.height;
5495 
5496         allocationSize = outputDepthPitch * glExtents.depth;
5497 
5498         // Note: because the LoadImageFunctionInfo functions are limited to copying a single
5499         // component, we have to special case packed depth/stencil use and send the stencil as a
5500         // separate chunk.
5501         if (storageFormat.depthBits > 0 && storageFormat.stencilBits > 0 &&
5502             formatInfo.depthBits > 0 && formatInfo.stencilBits > 0)
5503         {
5504             // Note: Stencil is always one byte
5505             stencilAllocationSize = glExtents.width * glExtents.height * glExtents.depth;
5506             allocationSize += stencilAllocationSize;
5507         }
5508     }
5509 
5510     VkBuffer bufferHandle = VK_NULL_HANDLE;
5511 
5512     uint8_t *stagingPointer    = nullptr;
5513     VkDeviceSize stagingOffset = 0;
5514     // If caller has provided a staging buffer, use it.
5515     DynamicBuffer *stagingBuffer = stagingBufferOverride ? stagingBufferOverride : &mStagingBuffer;
5516     size_t alignment             = mStagingBuffer.getAlignment();
5517     ANGLE_TRY(stagingBuffer->allocateWithAlignment(contextVk, allocationSize, alignment,
5518                                                    &stagingPointer, &bufferHandle, &stagingOffset,
5519                                                    nullptr));
5520     BufferHelper *currentBuffer = stagingBuffer->getCurrentBuffer();
5521 
5522     const uint8_t *source = pixels + static_cast<ptrdiff_t>(inputSkipBytes);
5523 
5524     loadFunctionInfo.loadFunction(glExtents.width, glExtents.height, glExtents.depth, source,
5525                                   inputRowPitch, inputDepthPitch, stagingPointer, outputRowPitch,
5526                                   outputDepthPitch);
5527 
5528     // YUV formats need special handling.
5529     if (vkFormat.actualImageFormat().isYUV)
5530     {
5531         gl::YuvFormatInfo yuvInfo(formatInfo.internalFormat, glExtents);
5532 
5533         constexpr VkImageAspectFlagBits kPlaneAspectFlags[3] = {
5534             VK_IMAGE_ASPECT_PLANE_0_BIT, VK_IMAGE_ASPECT_PLANE_1_BIT, VK_IMAGE_ASPECT_PLANE_2_BIT};
5535 
5536         // We only support mip level 0 and layerCount of 1 for YUV formats.
5537         ASSERT(index.getLevelIndex() == 0);
5538         ASSERT(index.getLayerCount() == 1);
5539 
5540         for (uint32_t plane = 0; plane < yuvInfo.planeCount; plane++)
5541         {
5542             VkBufferImageCopy copy           = {};
5543             copy.bufferOffset                = stagingOffset + yuvInfo.planeOffset[plane];
5544             copy.bufferRowLength             = 0;
5545             copy.bufferImageHeight           = 0;
5546             copy.imageSubresource.mipLevel   = 0;
5547             copy.imageSubresource.layerCount = 1;
5548             gl_vk::GetOffset(offset, &copy.imageOffset);
5549             gl_vk::GetExtent(yuvInfo.planeExtent[plane], &copy.imageExtent);
5550             copy.imageSubresource.baseArrayLayer = 0;
5551             copy.imageSubresource.aspectMask     = kPlaneAspectFlags[plane];
5552             appendSubresourceUpdate(gl::LevelIndex(0), SubresourceUpdate(currentBuffer, copy));
5553         }
5554 
5555         return angle::Result::Continue;
5556     }
5557 
5558     VkBufferImageCopy copy         = {};
5559     VkImageAspectFlags aspectFlags = GetFormatAspectFlags(vkFormat.actualImageFormat());
5560 
5561     copy.bufferOffset      = stagingOffset;
5562     copy.bufferRowLength   = bufferRowLength;
5563     copy.bufferImageHeight = bufferImageHeight;
5564 
5565     gl::LevelIndex updateLevelGL(index.getLevelIndex());
5566     copy.imageSubresource.mipLevel   = updateLevelGL.get();
5567     copy.imageSubresource.layerCount = index.getLayerCount();
5568 
5569     gl_vk::GetOffset(offset, &copy.imageOffset);
5570     gl_vk::GetExtent(glExtents, &copy.imageExtent);
5571 
5572     if (gl::IsArrayTextureType(index.getType()))
5573     {
5574         copy.imageSubresource.baseArrayLayer = offset.z;
5575         copy.imageOffset.z                   = 0;
5576         copy.imageExtent.depth               = 1;
5577     }
5578     else
5579     {
5580         copy.imageSubresource.baseArrayLayer = index.hasLayer() ? index.getLayerIndex() : 0;
5581     }
5582 
5583     if (stencilAllocationSize > 0)
5584     {
5585         // Note: Stencil is always one byte
5586         ASSERT((aspectFlags & VK_IMAGE_ASPECT_STENCIL_BIT) != 0);
5587 
5588         // Skip over depth data.
5589         stagingPointer += outputDepthPitch * glExtents.depth;
5590         stagingOffset += outputDepthPitch * glExtents.depth;
5591 
5592         // recompute pitch for stencil data
5593         outputRowPitch   = glExtents.width;
5594         outputDepthPitch = outputRowPitch * glExtents.height;
5595 
5596         ASSERT(stencilLoadFunction != nullptr);
5597         stencilLoadFunction(glExtents.width, glExtents.height, glExtents.depth, source,
5598                             inputRowPitch, inputDepthPitch, stagingPointer, outputRowPitch,
5599                             outputDepthPitch);
5600 
5601         VkBufferImageCopy stencilCopy = {};
5602 
5603         stencilCopy.bufferOffset                    = stagingOffset;
5604         stencilCopy.bufferRowLength                 = bufferRowLength;
5605         stencilCopy.bufferImageHeight               = bufferImageHeight;
5606         stencilCopy.imageSubresource.mipLevel       = copy.imageSubresource.mipLevel;
5607         stencilCopy.imageSubresource.baseArrayLayer = copy.imageSubresource.baseArrayLayer;
5608         stencilCopy.imageSubresource.layerCount     = copy.imageSubresource.layerCount;
5609         stencilCopy.imageOffset                     = copy.imageOffset;
5610         stencilCopy.imageExtent                     = copy.imageExtent;
5611         stencilCopy.imageSubresource.aspectMask     = VK_IMAGE_ASPECT_STENCIL_BIT;
5612         appendSubresourceUpdate(updateLevelGL, SubresourceUpdate(currentBuffer, stencilCopy));
5613 
5614         aspectFlags &= ~VK_IMAGE_ASPECT_STENCIL_BIT;
5615     }
5616 
5617     if (HasBothDepthAndStencilAspects(aspectFlags))
5618     {
5619         // We still have both depth and stencil aspect bits set. That means we have a destination
5620         // buffer that is packed depth stencil and that the application is only loading one aspect.
5621         // Figure out which aspect the user is touching and remove the unused aspect bit.
5622         if (formatInfo.stencilBits > 0)
5623         {
5624             aspectFlags &= ~VK_IMAGE_ASPECT_DEPTH_BIT;
5625         }
5626         else
5627         {
5628             aspectFlags &= ~VK_IMAGE_ASPECT_STENCIL_BIT;
5629         }
5630     }
5631 
5632     if (aspectFlags)
5633     {
5634         copy.imageSubresource.aspectMask = aspectFlags;
5635         appendSubresourceUpdate(updateLevelGL, SubresourceUpdate(currentBuffer, copy));
5636     }
5637 
5638     return angle::Result::Continue;
5639 }
5640 
CalculateBufferInfo(ContextVk * contextVk,const gl::Extents & glExtents,const gl::InternalFormat & formatInfo,const gl::PixelUnpackState & unpack,GLenum type,bool is3D,GLuint * inputRowPitch,GLuint * inputDepthPitch,GLuint * inputSkipBytes)5641 angle::Result ImageHelper::CalculateBufferInfo(ContextVk *contextVk,
5642                                                const gl::Extents &glExtents,
5643                                                const gl::InternalFormat &formatInfo,
5644                                                const gl::PixelUnpackState &unpack,
5645                                                GLenum type,
5646                                                bool is3D,
5647                                                GLuint *inputRowPitch,
5648                                                GLuint *inputDepthPitch,
5649                                                GLuint *inputSkipBytes)
5650 {
5651     // YUV formats need special handling.
5652     if (gl::IsYuvFormat(formatInfo.internalFormat))
5653     {
5654         gl::YuvFormatInfo yuvInfo(formatInfo.internalFormat, glExtents);
5655 
5656         // row pitch = Y plane row pitch
5657         *inputRowPitch = yuvInfo.planePitch[0];
5658         // depth pitch = Y plane size + chroma plane size
5659         *inputDepthPitch = yuvInfo.planeSize[0] + yuvInfo.planeSize[1] + yuvInfo.planeSize[2];
5660         *inputSkipBytes  = 0;
5661 
5662         return angle::Result::Continue;
5663     }
5664 
5665     ANGLE_VK_CHECK_MATH(contextVk,
5666                         formatInfo.computeRowPitch(type, glExtents.width, unpack.alignment,
5667                                                    unpack.rowLength, inputRowPitch));
5668 
5669     ANGLE_VK_CHECK_MATH(contextVk,
5670                         formatInfo.computeDepthPitch(glExtents.height, unpack.imageHeight,
5671                                                      *inputRowPitch, inputDepthPitch));
5672 
5673     ANGLE_VK_CHECK_MATH(
5674         contextVk, formatInfo.computeSkipBytes(type, *inputRowPitch, *inputDepthPitch, unpack, is3D,
5675                                                inputSkipBytes));
5676 
5677     return angle::Result::Continue;
5678 }
5679 
hasImmutableSampler() const5680 bool ImageHelper::hasImmutableSampler() const
5681 {
5682     return mExternalFormat != 0 || mFormat->actualImageFormat().isYUV;
5683 }
5684 
onWrite(gl::LevelIndex levelStart,uint32_t levelCount,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags)5685 void ImageHelper::onWrite(gl::LevelIndex levelStart,
5686                           uint32_t levelCount,
5687                           uint32_t layerStart,
5688                           uint32_t layerCount,
5689                           VkImageAspectFlags aspectFlags)
5690 {
5691     mCurrentSingleClearValue.reset();
5692 
5693     // Mark contents of the given subresource as defined.
5694     setContentDefined(toVkLevel(levelStart), levelCount, layerStart, layerCount, aspectFlags);
5695 }
5696 
hasSubresourceDefinedContent(gl::LevelIndex level,uint32_t layerIndex,uint32_t layerCount) const5697 bool ImageHelper::hasSubresourceDefinedContent(gl::LevelIndex level,
5698                                                uint32_t layerIndex,
5699                                                uint32_t layerCount) const
5700 {
5701     if (layerIndex >= kMaxContentDefinedLayerCount)
5702     {
5703         return true;
5704     }
5705 
5706     uint8_t layerRangeBits =
5707         GetContentDefinedLayerRangeBits(layerIndex, layerCount, kMaxContentDefinedLayerCount);
5708     return (getLevelContentDefined(toVkLevel(level)) & LevelContentDefinedMask(layerRangeBits))
5709         .any();
5710 }
5711 
hasSubresourceDefinedStencilContent(gl::LevelIndex level,uint32_t layerIndex,uint32_t layerCount) const5712 bool ImageHelper::hasSubresourceDefinedStencilContent(gl::LevelIndex level,
5713                                                       uint32_t layerIndex,
5714                                                       uint32_t layerCount) const
5715 {
5716     if (layerIndex >= kMaxContentDefinedLayerCount)
5717     {
5718         return true;
5719     }
5720 
5721     uint8_t layerRangeBits =
5722         GetContentDefinedLayerRangeBits(layerIndex, layerCount, kMaxContentDefinedLayerCount);
5723     return (getLevelStencilContentDefined(toVkLevel(level)) &
5724             LevelContentDefinedMask(layerRangeBits))
5725         .any();
5726 }
5727 
invalidateSubresourceContent(ContextVk * contextVk,gl::LevelIndex level,uint32_t layerIndex,uint32_t layerCount)5728 void ImageHelper::invalidateSubresourceContent(ContextVk *contextVk,
5729                                                gl::LevelIndex level,
5730                                                uint32_t layerIndex,
5731                                                uint32_t layerCount)
5732 {
5733     if (layerIndex < kMaxContentDefinedLayerCount)
5734     {
5735         uint8_t layerRangeBits =
5736             GetContentDefinedLayerRangeBits(layerIndex, layerCount, kMaxContentDefinedLayerCount);
5737         getLevelContentDefined(toVkLevel(level)) &= static_cast<uint8_t>(~layerRangeBits);
5738     }
5739     else
5740     {
5741         ANGLE_PERF_WARNING(
5742             contextVk->getDebug(), GL_DEBUG_SEVERITY_LOW,
5743             "glInvalidateFramebuffer (color or depth) ineffective on attachments with layer >= 8");
5744     }
5745 }
5746 
invalidateSubresourceStencilContent(ContextVk * contextVk,gl::LevelIndex level,uint32_t layerIndex,uint32_t layerCount)5747 void ImageHelper::invalidateSubresourceStencilContent(ContextVk *contextVk,
5748                                                       gl::LevelIndex level,
5749                                                       uint32_t layerIndex,
5750                                                       uint32_t layerCount)
5751 {
5752     if (layerIndex < kMaxContentDefinedLayerCount)
5753     {
5754         uint8_t layerRangeBits =
5755             GetContentDefinedLayerRangeBits(layerIndex, layerCount, kMaxContentDefinedLayerCount);
5756         getLevelStencilContentDefined(toVkLevel(level)) &= static_cast<uint8_t>(~layerRangeBits);
5757     }
5758     else
5759     {
5760         ANGLE_PERF_WARNING(
5761             contextVk->getDebug(), GL_DEBUG_SEVERITY_LOW,
5762             "glInvalidateFramebuffer (stencil) ineffective on attachments with layer >= 8");
5763     }
5764 }
5765 
restoreSubresourceContent(gl::LevelIndex level,uint32_t layerIndex,uint32_t layerCount)5766 void ImageHelper::restoreSubresourceContent(gl::LevelIndex level,
5767                                             uint32_t layerIndex,
5768                                             uint32_t layerCount)
5769 {
5770     if (layerIndex < kMaxContentDefinedLayerCount)
5771     {
5772         uint8_t layerRangeBits =
5773             GetContentDefinedLayerRangeBits(layerIndex, layerCount, kMaxContentDefinedLayerCount);
5774         getLevelContentDefined(toVkLevel(level)) |= layerRangeBits;
5775     }
5776 }
5777 
restoreSubresourceStencilContent(gl::LevelIndex level,uint32_t layerIndex,uint32_t layerCount)5778 void ImageHelper::restoreSubresourceStencilContent(gl::LevelIndex level,
5779                                                    uint32_t layerIndex,
5780                                                    uint32_t layerCount)
5781 {
5782     if (layerIndex < kMaxContentDefinedLayerCount)
5783     {
5784         uint8_t layerRangeBits =
5785             GetContentDefinedLayerRangeBits(layerIndex, layerCount, kMaxContentDefinedLayerCount);
5786         getLevelStencilContentDefined(toVkLevel(level)) |= layerRangeBits;
5787     }
5788 }
5789 
stageSubresourceUpdate(ContextVk * contextVk,const gl::ImageIndex & index,const gl::Extents & glExtents,const gl::Offset & offset,const gl::InternalFormat & formatInfo,const gl::PixelUnpackState & unpack,DynamicBuffer * stagingBufferOverride,GLenum type,const uint8_t * pixels,const Format & vkFormat)5790 angle::Result ImageHelper::stageSubresourceUpdate(ContextVk *contextVk,
5791                                                   const gl::ImageIndex &index,
5792                                                   const gl::Extents &glExtents,
5793                                                   const gl::Offset &offset,
5794                                                   const gl::InternalFormat &formatInfo,
5795                                                   const gl::PixelUnpackState &unpack,
5796                                                   DynamicBuffer *stagingBufferOverride,
5797                                                   GLenum type,
5798                                                   const uint8_t *pixels,
5799                                                   const Format &vkFormat)
5800 {
5801     GLuint inputRowPitch   = 0;
5802     GLuint inputDepthPitch = 0;
5803     GLuint inputSkipBytes  = 0;
5804     ANGLE_TRY(CalculateBufferInfo(contextVk, glExtents, formatInfo, unpack, type, index.usesTex3D(),
5805                                   &inputRowPitch, &inputDepthPitch, &inputSkipBytes));
5806 
5807     ANGLE_TRY(stageSubresourceUpdateImpl(contextVk, index, glExtents, offset, formatInfo, unpack,
5808                                          stagingBufferOverride, type, pixels, vkFormat,
5809                                          inputRowPitch, inputDepthPitch, inputSkipBytes));
5810 
5811     return angle::Result::Continue;
5812 }
5813 
stageSubresourceUpdateAndGetData(ContextVk * contextVk,size_t allocationSize,const gl::ImageIndex & imageIndex,const gl::Extents & glExtents,const gl::Offset & offset,uint8_t ** destData,DynamicBuffer * stagingBufferOverride)5814 angle::Result ImageHelper::stageSubresourceUpdateAndGetData(ContextVk *contextVk,
5815                                                             size_t allocationSize,
5816                                                             const gl::ImageIndex &imageIndex,
5817                                                             const gl::Extents &glExtents,
5818                                                             const gl::Offset &offset,
5819                                                             uint8_t **destData,
5820                                                             DynamicBuffer *stagingBufferOverride)
5821 {
5822     VkBuffer bufferHandle;
5823     VkDeviceSize stagingOffset = 0;
5824 
5825     DynamicBuffer *stagingBuffer = stagingBufferOverride ? stagingBufferOverride : &mStagingBuffer;
5826     size_t alignment             = mStagingBuffer.getAlignment();
5827     ANGLE_TRY(stagingBuffer->allocateWithAlignment(contextVk, allocationSize, alignment, destData,
5828                                                    &bufferHandle, &stagingOffset, nullptr));
5829 
5830     gl::LevelIndex updateLevelGL(imageIndex.getLevelIndex());
5831 
5832     VkBufferImageCopy copy               = {};
5833     copy.bufferOffset                    = stagingOffset;
5834     copy.bufferRowLength                 = glExtents.width;
5835     copy.bufferImageHeight               = glExtents.height;
5836     copy.imageSubresource.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT;
5837     copy.imageSubresource.mipLevel       = updateLevelGL.get();
5838     copy.imageSubresource.baseArrayLayer = imageIndex.hasLayer() ? imageIndex.getLayerIndex() : 0;
5839     copy.imageSubresource.layerCount     = imageIndex.getLayerCount();
5840 
5841     // Note: Only support color now
5842     ASSERT((mFormat == nullptr) || (getAspectFlags() == VK_IMAGE_ASPECT_COLOR_BIT));
5843 
5844     gl_vk::GetOffset(offset, &copy.imageOffset);
5845     gl_vk::GetExtent(glExtents, &copy.imageExtent);
5846 
5847     appendSubresourceUpdate(updateLevelGL,
5848                             SubresourceUpdate(stagingBuffer->getCurrentBuffer(), copy));
5849 
5850     return angle::Result::Continue;
5851 }
5852 
stageSubresourceUpdateFromFramebuffer(const gl::Context * context,const gl::ImageIndex & index,const gl::Rectangle & sourceArea,const gl::Offset & dstOffset,const gl::Extents & dstExtent,const gl::InternalFormat & formatInfo,FramebufferVk * framebufferVk,DynamicBuffer * stagingBufferOverride)5853 angle::Result ImageHelper::stageSubresourceUpdateFromFramebuffer(
5854     const gl::Context *context,
5855     const gl::ImageIndex &index,
5856     const gl::Rectangle &sourceArea,
5857     const gl::Offset &dstOffset,
5858     const gl::Extents &dstExtent,
5859     const gl::InternalFormat &formatInfo,
5860     FramebufferVk *framebufferVk,
5861     DynamicBuffer *stagingBufferOverride)
5862 {
5863     ContextVk *contextVk = GetImpl(context);
5864 
5865     // If the extents and offset is outside the source image, we need to clip.
5866     gl::Rectangle clippedRectangle;
5867     const gl::Extents readExtents = framebufferVk->getReadImageExtents();
5868     if (!ClipRectangle(sourceArea, gl::Rectangle(0, 0, readExtents.width, readExtents.height),
5869                        &clippedRectangle))
5870     {
5871         // Empty source area, nothing to do.
5872         return angle::Result::Continue;
5873     }
5874 
5875     bool isViewportFlipEnabled = contextVk->isViewportFlipEnabledForDrawFBO();
5876     if (isViewportFlipEnabled)
5877     {
5878         clippedRectangle.y = readExtents.height - clippedRectangle.y - clippedRectangle.height;
5879     }
5880 
5881     // 1- obtain a buffer handle to copy to
5882     RendererVk *renderer = contextVk->getRenderer();
5883 
5884     const Format &vkFormat             = renderer->getFormat(formatInfo.sizedInternalFormat);
5885     const angle::Format &storageFormat = vkFormat.actualImageFormat();
5886     LoadImageFunctionInfo loadFunction = vkFormat.textureLoadFunctions(formatInfo.type);
5887 
5888     size_t outputRowPitch   = storageFormat.pixelBytes * clippedRectangle.width;
5889     size_t outputDepthPitch = outputRowPitch * clippedRectangle.height;
5890 
5891     VkBuffer bufferHandle = VK_NULL_HANDLE;
5892 
5893     uint8_t *stagingPointer    = nullptr;
5894     VkDeviceSize stagingOffset = 0;
5895 
5896     // The destination is only one layer deep.
5897     size_t allocationSize        = outputDepthPitch;
5898     DynamicBuffer *stagingBuffer = stagingBufferOverride ? stagingBufferOverride : &mStagingBuffer;
5899     size_t alignment             = mStagingBuffer.getAlignment();
5900     ANGLE_TRY(stagingBuffer->allocateWithAlignment(contextVk, allocationSize, alignment,
5901                                                    &stagingPointer, &bufferHandle, &stagingOffset,
5902                                                    nullptr));
5903     BufferHelper *currentBuffer = stagingBuffer->getCurrentBuffer();
5904 
5905     const angle::Format &copyFormat =
5906         GetFormatFromFormatType(formatInfo.internalFormat, formatInfo.type);
5907     PackPixelsParams params(clippedRectangle, copyFormat, static_cast<GLuint>(outputRowPitch),
5908                             isViewportFlipEnabled, nullptr, 0);
5909 
5910     RenderTargetVk *readRenderTarget = framebufferVk->getColorReadRenderTarget();
5911 
5912     // 2- copy the source image region to the pixel buffer using a cpu readback
5913     if (loadFunction.requiresConversion)
5914     {
5915         // When a conversion is required, we need to use the loadFunction to read from a temporary
5916         // buffer instead so its an even slower path.
5917         size_t bufferSize =
5918             storageFormat.pixelBytes * clippedRectangle.width * clippedRectangle.height;
5919         angle::MemoryBuffer *memoryBuffer = nullptr;
5920         ANGLE_VK_CHECK_ALLOC(contextVk, context->getScratchBuffer(bufferSize, &memoryBuffer));
5921 
5922         // Read into the scratch buffer
5923         ANGLE_TRY(framebufferVk->readPixelsImpl(contextVk, clippedRectangle, params,
5924                                                 VK_IMAGE_ASPECT_COLOR_BIT, readRenderTarget,
5925                                                 memoryBuffer->data()));
5926 
5927         // Load from scratch buffer to our pixel buffer
5928         loadFunction.loadFunction(clippedRectangle.width, clippedRectangle.height, 1,
5929                                   memoryBuffer->data(), outputRowPitch, 0, stagingPointer,
5930                                   outputRowPitch, 0);
5931     }
5932     else
5933     {
5934         // We read directly from the framebuffer into our pixel buffer.
5935         ANGLE_TRY(framebufferVk->readPixelsImpl(contextVk, clippedRectangle, params,
5936                                                 VK_IMAGE_ASPECT_COLOR_BIT, readRenderTarget,
5937                                                 stagingPointer));
5938     }
5939 
5940     gl::LevelIndex updateLevelGL(index.getLevelIndex());
5941 
5942     // 3- enqueue the destination image subresource update
5943     VkBufferImageCopy copyToImage               = {};
5944     copyToImage.bufferOffset                    = static_cast<VkDeviceSize>(stagingOffset);
5945     copyToImage.bufferRowLength                 = 0;  // Tightly packed data can be specified as 0.
5946     copyToImage.bufferImageHeight               = clippedRectangle.height;
5947     copyToImage.imageSubresource.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT;
5948     copyToImage.imageSubresource.mipLevel       = updateLevelGL.get();
5949     copyToImage.imageSubresource.baseArrayLayer = index.hasLayer() ? index.getLayerIndex() : 0;
5950     copyToImage.imageSubresource.layerCount     = index.getLayerCount();
5951     gl_vk::GetOffset(dstOffset, &copyToImage.imageOffset);
5952     gl_vk::GetExtent(dstExtent, &copyToImage.imageExtent);
5953 
5954     // 3- enqueue the destination image subresource update
5955     appendSubresourceUpdate(updateLevelGL, SubresourceUpdate(currentBuffer, copyToImage));
5956     return angle::Result::Continue;
5957 }
5958 
stageSubresourceUpdateFromImage(RefCounted<ImageHelper> * image,const gl::ImageIndex & index,LevelIndex srcMipLevel,const gl::Offset & destOffset,const gl::Extents & glExtents,const VkImageType imageType)5959 void ImageHelper::stageSubresourceUpdateFromImage(RefCounted<ImageHelper> *image,
5960                                                   const gl::ImageIndex &index,
5961                                                   LevelIndex srcMipLevel,
5962                                                   const gl::Offset &destOffset,
5963                                                   const gl::Extents &glExtents,
5964                                                   const VkImageType imageType)
5965 {
5966     gl::LevelIndex updateLevelGL(index.getLevelIndex());
5967 
5968     VkImageCopy copyToImage               = {};
5969     copyToImage.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
5970     copyToImage.srcSubresource.mipLevel   = srcMipLevel.get();
5971     copyToImage.srcSubresource.layerCount = index.getLayerCount();
5972     copyToImage.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
5973     copyToImage.dstSubresource.mipLevel   = updateLevelGL.get();
5974 
5975     if (imageType == VK_IMAGE_TYPE_3D)
5976     {
5977         // These values must be set explicitly to follow the Vulkan spec:
5978         // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/VkImageCopy.html
5979         // If either of the calling command's srcImage or dstImage parameters are of VkImageType
5980         // VK_IMAGE_TYPE_3D, the baseArrayLayer and layerCount members of the corresponding
5981         // subresource must be 0 and 1, respectively
5982         copyToImage.dstSubresource.baseArrayLayer = 0;
5983         copyToImage.dstSubresource.layerCount     = 1;
5984         // Preserve the assumption that destOffset.z == "dstSubresource.baseArrayLayer"
5985         ASSERT(destOffset.z == (index.hasLayer() ? index.getLayerIndex() : 0));
5986     }
5987     else
5988     {
5989         copyToImage.dstSubresource.baseArrayLayer = index.hasLayer() ? index.getLayerIndex() : 0;
5990         copyToImage.dstSubresource.layerCount     = index.getLayerCount();
5991     }
5992 
5993     gl_vk::GetOffset(destOffset, &copyToImage.dstOffset);
5994     gl_vk::GetExtent(glExtents, &copyToImage.extent);
5995 
5996     appendSubresourceUpdate(updateLevelGL, SubresourceUpdate(image, copyToImage));
5997 }
5998 
stageSubresourceUpdatesFromAllImageLevels(RefCounted<ImageHelper> * image,gl::LevelIndex baseLevel)5999 void ImageHelper::stageSubresourceUpdatesFromAllImageLevels(RefCounted<ImageHelper> *image,
6000                                                             gl::LevelIndex baseLevel)
6001 {
6002     for (LevelIndex levelVk(0); levelVk < LevelIndex(image->get().getLevelCount()); ++levelVk)
6003     {
6004         const gl::LevelIndex levelGL = vk_gl::GetLevelIndex(levelVk, baseLevel);
6005         const gl::ImageIndex index =
6006             gl::ImageIndex::Make2DArrayRange(levelGL.get(), 0, image->get().getLayerCount());
6007 
6008         stageSubresourceUpdateFromImage(image, index, levelVk, gl::kOffsetZero,
6009                                         image->get().getLevelExtents(levelVk),
6010                                         image->get().getType());
6011     }
6012 }
6013 
stageClear(const gl::ImageIndex & index,VkImageAspectFlags aspectFlags,const VkClearValue & clearValue)6014 void ImageHelper::stageClear(const gl::ImageIndex &index,
6015                              VkImageAspectFlags aspectFlags,
6016                              const VkClearValue &clearValue)
6017 {
6018     gl::LevelIndex updateLevelGL(index.getLevelIndex());
6019     appendSubresourceUpdate(updateLevelGL, SubresourceUpdate(aspectFlags, clearValue, index));
6020 }
6021 
stageRobustResourceClear(const gl::ImageIndex & index)6022 void ImageHelper::stageRobustResourceClear(const gl::ImageIndex &index)
6023 {
6024     const VkImageAspectFlags aspectFlags = getAspectFlags();
6025 
6026     ASSERT(mFormat);
6027     VkClearValue clearValue = GetRobustResourceClearValue(*mFormat);
6028 
6029     gl::LevelIndex updateLevelGL(index.getLevelIndex());
6030     appendSubresourceUpdate(updateLevelGL, SubresourceUpdate(aspectFlags, clearValue, index));
6031 }
6032 
stageRobustResourceClearWithFormat(ContextVk * contextVk,const gl::ImageIndex & index,const gl::Extents & glExtents,const Format & format)6033 angle::Result ImageHelper::stageRobustResourceClearWithFormat(ContextVk *contextVk,
6034                                                               const gl::ImageIndex &index,
6035                                                               const gl::Extents &glExtents,
6036                                                               const Format &format)
6037 {
6038     const angle::Format &imageFormat     = format.actualImageFormat();
6039     const VkImageAspectFlags aspectFlags = GetFormatAspectFlags(imageFormat);
6040 
6041     // Robust clears must only be staged if we do not have any prior data for this subresource.
6042     ASSERT(!hasStagedUpdatesForSubresource(gl::LevelIndex(index.getLevelIndex()),
6043                                            index.getLayerIndex(), index.getLayerCount()));
6044 
6045     VkClearValue clearValue = GetRobustResourceClearValue(format);
6046 
6047     gl::LevelIndex updateLevelGL(index.getLevelIndex());
6048 
6049     if (imageFormat.isBlock)
6050     {
6051         // This only supports doing an initial clear to 0, not clearing to a specific encoded RGBA
6052         // value
6053         ASSERT((clearValue.color.int32[0] == 0) && (clearValue.color.int32[1] == 0) &&
6054                (clearValue.color.int32[2] == 0) && (clearValue.color.int32[3] == 0));
6055 
6056         const gl::InternalFormat &formatInfo =
6057             gl::GetSizedInternalFormatInfo(imageFormat.glInternalFormat);
6058         GLuint totalSize;
6059         ANGLE_VK_CHECK_MATH(contextVk,
6060                             formatInfo.computeCompressedImageSize(glExtents, &totalSize));
6061 
6062         VkBuffer bufferHandle      = VK_NULL_HANDLE;
6063         uint8_t *stagingPointer    = nullptr;
6064         VkDeviceSize stagingOffset = 0;
6065         ANGLE_TRY(mStagingBuffer.allocate(contextVk, totalSize, &stagingPointer, &bufferHandle,
6066                                           &stagingOffset, nullptr));
6067         memset(stagingPointer, 0, totalSize);
6068 
6069         VkBufferImageCopy copyRegion               = {};
6070         copyRegion.imageExtent.width               = glExtents.width;
6071         copyRegion.imageExtent.height              = glExtents.height;
6072         copyRegion.imageExtent.depth               = glExtents.depth;
6073         copyRegion.imageSubresource.mipLevel       = updateLevelGL.get();
6074         copyRegion.imageSubresource.aspectMask     = aspectFlags;
6075         copyRegion.imageSubresource.baseArrayLayer = index.hasLayer() ? index.getLayerIndex() : 0;
6076         copyRegion.imageSubresource.layerCount     = index.getLayerCount();
6077 
6078         appendSubresourceUpdate(updateLevelGL,
6079                                 SubresourceUpdate(mStagingBuffer.getCurrentBuffer(), copyRegion));
6080     }
6081     else
6082     {
6083         appendSubresourceUpdate(updateLevelGL, SubresourceUpdate(aspectFlags, clearValue, index));
6084     }
6085 
6086     return angle::Result::Continue;
6087 }
6088 
stageClearIfEmulatedFormat(bool isRobustResourceInitEnabled)6089 void ImageHelper::stageClearIfEmulatedFormat(bool isRobustResourceInitEnabled)
6090 {
6091     // Skip staging extra clears if robust resource init is enabled.
6092     if (!mFormat->hasEmulatedImageChannels() || isRobustResourceInitEnabled)
6093     {
6094         return;
6095     }
6096 
6097     VkClearValue clearValue = {};
6098     if (mFormat->intendedFormat().hasDepthOrStencilBits())
6099     {
6100         clearValue.depthStencil = kRobustInitDepthStencilValue;
6101     }
6102     else
6103     {
6104         clearValue.color = kEmulatedInitColorValue;
6105     }
6106 
6107     const VkImageAspectFlags aspectFlags = getAspectFlags();
6108 
6109     // If the image has an emulated channel and robust resource init is not enabled, always clear
6110     // it. These channels will be masked out in future writes, and shouldn't contain uninitialized
6111     // values.
6112     for (LevelIndex level(0); level < LevelIndex(mLevelCount); ++level)
6113     {
6114         gl::LevelIndex updateLevelGL = toGLLevel(level);
6115         gl::ImageIndex index =
6116             gl::ImageIndex::Make2DArrayRange(updateLevelGL.get(), 0, mLayerCount);
6117         prependSubresourceUpdate(updateLevelGL, SubresourceUpdate(aspectFlags, clearValue, index));
6118     }
6119 }
6120 
stageSelfAsSubresourceUpdates(ContextVk * contextVk,uint32_t levelCount,gl::TexLevelMask skipLevelsMask)6121 void ImageHelper::stageSelfAsSubresourceUpdates(ContextVk *contextVk,
6122                                                 uint32_t levelCount,
6123                                                 gl::TexLevelMask skipLevelsMask)
6124 
6125 {
6126     // Nothing to do if every level must be skipped
6127     if ((~skipLevelsMask & gl::TexLevelMask(angle::BitMask<uint32_t>(levelCount))).none())
6128     {
6129         return;
6130     }
6131 
6132     // Because we are cloning this object to another object, we must finalize the layout if it is
6133     // being used by current renderpass as attachment. Otherwise we are copying the incorrect layout
6134     // since it is determined at endRenderPass time.
6135     contextVk->finalizeImageLayout(this);
6136 
6137     std::unique_ptr<RefCounted<ImageHelper>> prevImage =
6138         std::make_unique<RefCounted<ImageHelper>>();
6139 
6140     // Move the necessary information for staged update to work, and keep the rest as part of this
6141     // object.
6142 
6143     // Vulkan objects
6144     prevImage->get().mImage        = std::move(mImage);
6145     prevImage->get().mDeviceMemory = std::move(mDeviceMemory);
6146 
6147     // Barrier information.  Note: mLevelCount is set to levelCount so that only the necessary
6148     // levels are transitioned when flushing the update.
6149     prevImage->get().mFormat                      = mFormat;
6150     prevImage->get().mCurrentLayout               = mCurrentLayout;
6151     prevImage->get().mCurrentQueueFamilyIndex     = mCurrentQueueFamilyIndex;
6152     prevImage->get().mLastNonShaderReadOnlyLayout = mLastNonShaderReadOnlyLayout;
6153     prevImage->get().mCurrentShaderReadStageMask  = mCurrentShaderReadStageMask;
6154     prevImage->get().mLevelCount                  = levelCount;
6155     prevImage->get().mLayerCount                  = mLayerCount;
6156     prevImage->get().mImageSerial                 = mImageSerial;
6157 
6158     // Reset information for current (invalid) image.
6159     mCurrentLayout               = ImageLayout::Undefined;
6160     mCurrentQueueFamilyIndex     = std::numeric_limits<uint32_t>::max();
6161     mLastNonShaderReadOnlyLayout = ImageLayout::Undefined;
6162     mCurrentShaderReadStageMask  = 0;
6163     mImageSerial                 = kInvalidImageSerial;
6164 
6165     setEntireContentUndefined();
6166 
6167     // Stage updates from the previous image.
6168     for (LevelIndex levelVk(0); levelVk < LevelIndex(levelCount); ++levelVk)
6169     {
6170         if (skipLevelsMask.test(levelVk.get()))
6171         {
6172             continue;
6173         }
6174 
6175         const gl::ImageIndex index =
6176             gl::ImageIndex::Make2DArrayRange(toGLLevel(levelVk).get(), 0, mLayerCount);
6177 
6178         stageSubresourceUpdateFromImage(prevImage.get(), index, levelVk, gl::kOffsetZero,
6179                                         getLevelExtents(levelVk), mImageType);
6180     }
6181 
6182     ASSERT(levelCount > 0);
6183     prevImage.release();
6184 }
6185 
flushSingleSubresourceStagedUpdates(ContextVk * contextVk,gl::LevelIndex levelGL,uint32_t layer,uint32_t layerCount,ClearValuesArray * deferredClears,uint32_t deferredClearIndex)6186 angle::Result ImageHelper::flushSingleSubresourceStagedUpdates(ContextVk *contextVk,
6187                                                                gl::LevelIndex levelGL,
6188                                                                uint32_t layer,
6189                                                                uint32_t layerCount,
6190                                                                ClearValuesArray *deferredClears,
6191                                                                uint32_t deferredClearIndex)
6192 {
6193     std::vector<SubresourceUpdate> *levelUpdates = getLevelUpdates(levelGL);
6194     if (levelUpdates == nullptr || levelUpdates->empty())
6195     {
6196         return angle::Result::Continue;
6197     }
6198 
6199     LevelIndex levelVk = toVkLevel(levelGL);
6200 
6201     // Handle deferred clears. Search the updates list for a matching clear index.
6202     if (deferredClears)
6203     {
6204         Optional<size_t> foundClear;
6205 
6206         for (size_t updateIndex = 0; updateIndex < levelUpdates->size(); ++updateIndex)
6207         {
6208             SubresourceUpdate &update = (*levelUpdates)[updateIndex];
6209 
6210             if (update.isUpdateToLayers(layer, layerCount))
6211             {
6212                 // On any data update, exit out. We'll need to do a full upload.
6213                 const bool isClear              = update.updateSource == UpdateSource::Clear;
6214                 const uint32_t updateLayerCount = isClear ? update.data.clear.layerCount : 0;
6215                 const uint32_t imageLayerCount =
6216                     mImageType == VK_IMAGE_TYPE_3D ? getLevelExtents(levelVk).depth : mLayerCount;
6217 
6218                 if (!isClear || (updateLayerCount != layerCount &&
6219                                  !(update.data.clear.layerCount == VK_REMAINING_ARRAY_LAYERS &&
6220                                    imageLayerCount == layerCount)))
6221                 {
6222                     foundClear.reset();
6223                     break;
6224                 }
6225 
6226                 // Otherwise track the latest clear update index.
6227                 foundClear = updateIndex;
6228             }
6229         }
6230 
6231         // If we have a valid index we defer the clear using the clear reference.
6232         if (foundClear.valid())
6233         {
6234             size_t foundIndex         = foundClear.value();
6235             const ClearUpdate &update = (*levelUpdates)[foundIndex].data.clear;
6236 
6237             // Note that this set command handles combined or separate depth/stencil clears.
6238             deferredClears->store(deferredClearIndex, update.aspectFlags, update.value);
6239 
6240             // Do not call onWrite as it removes mCurrentSingleClearValue, but instead call
6241             // setContentDefined directly.
6242             setContentDefined(toVkLevel(levelGL), 1, layer, layerCount, update.aspectFlags);
6243 
6244             // We process the updates again to erase any clears for this level.
6245             removeSingleSubresourceStagedUpdates(contextVk, levelGL, layer, layerCount);
6246             return angle::Result::Continue;
6247         }
6248 
6249         // Otherwise we proceed with a normal update.
6250     }
6251 
6252     return flushStagedUpdates(contextVk, levelGL, levelGL + 1, layer, layer + layerCount, {});
6253 }
6254 
flushStagedUpdates(ContextVk * contextVk,gl::LevelIndex levelGLStart,gl::LevelIndex levelGLEnd,uint32_t layerStart,uint32_t layerEnd,gl::TexLevelMask skipLevelsMask)6255 angle::Result ImageHelper::flushStagedUpdates(ContextVk *contextVk,
6256                                               gl::LevelIndex levelGLStart,
6257                                               gl::LevelIndex levelGLEnd,
6258                                               uint32_t layerStart,
6259                                               uint32_t layerEnd,
6260                                               gl::TexLevelMask skipLevelsMask)
6261 {
6262     if (!hasStagedUpdatesInLevels(levelGLStart, levelGLEnd))
6263     {
6264         return angle::Result::Continue;
6265     }
6266 
6267     removeSupersededUpdates(contextVk, skipLevelsMask);
6268 
6269     // If a clear is requested and we know it was previously cleared with the same value, we drop
6270     // the clear.
6271     if (mCurrentSingleClearValue.valid())
6272     {
6273         std::vector<SubresourceUpdate> *levelUpdates =
6274             getLevelUpdates(gl::LevelIndex(mCurrentSingleClearValue.value().levelIndex));
6275         if (levelUpdates && levelUpdates->size() == 1)
6276         {
6277             SubresourceUpdate &update = (*levelUpdates)[0];
6278             if (update.updateSource == UpdateSource::Clear &&
6279                 mCurrentSingleClearValue.value() == update.data.clear)
6280             {
6281                 ANGLE_PERF_WARNING(contextVk->getDebug(), GL_DEBUG_SEVERITY_LOW,
6282                                    "Repeated Clear on framebuffer attachment dropped");
6283                 update.release(contextVk->getRenderer());
6284                 levelUpdates->clear();
6285                 return angle::Result::Continue;
6286             }
6287         }
6288     }
6289 
6290     ASSERT(validateSubresourceUpdateImageRefsConsistent());
6291 
6292     ANGLE_TRY(mStagingBuffer.flush(contextVk));
6293 
6294     const VkImageAspectFlags aspectFlags = GetFormatAspectFlags(mFormat->actualImageFormat());
6295 
6296     // For each level, upload layers that don't conflict in parallel.  The layer is hashed to
6297     // `layer % 64` and used to track whether that subresource is currently in transfer.  If so, a
6298     // barrier is inserted.  If mLayerCount > 64, there will be a few unnecessary barriers.
6299     //
6300     // Note: when a barrier is necessary when uploading updates to a level, we could instead move to
6301     // the next level and continue uploads in parallel.  Once all levels need a barrier, a single
6302     // barrier can be issued and we could continue with the rest of the updates from the first
6303     // level.
6304     constexpr uint32_t kMaxParallelSubresourceUpload = 64;
6305 
6306     // Start in TransferDst.  Don't yet mark any subresource as having defined contents; that is
6307     // done with fine granularity as updates are applied.  This is achieved by specifying a layer
6308     // that is outside the tracking range.
6309     CommandBufferAccess access;
6310     access.onImageTransferWrite(levelGLStart, 1, kMaxContentDefinedLayerCount, 0, aspectFlags,
6311                                 this);
6312 
6313     CommandBuffer *commandBuffer;
6314     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(access, &commandBuffer));
6315 
6316     for (gl::LevelIndex updateMipLevelGL = levelGLStart; updateMipLevelGL < levelGLEnd;
6317          ++updateMipLevelGL)
6318     {
6319         std::vector<SubresourceUpdate> *levelUpdates = getLevelUpdates(updateMipLevelGL);
6320         if (levelUpdates == nullptr)
6321         {
6322             ASSERT(static_cast<size_t>(updateMipLevelGL.get()) >= mSubresourceUpdates.size());
6323             break;
6324         }
6325 
6326         std::vector<SubresourceUpdate> updatesToKeep;
6327 
6328         // Hash map of uploads in progress.  See comment on kMaxParallelSubresourceUpload.
6329         uint64_t subresourceUploadsInProgress = 0;
6330 
6331         for (SubresourceUpdate &update : *levelUpdates)
6332         {
6333             ASSERT(update.updateSource == UpdateSource::Clear ||
6334                    (update.updateSource == UpdateSource::Buffer &&
6335                     update.data.buffer.bufferHelper != nullptr) ||
6336                    (update.updateSource == UpdateSource::Image && update.image != nullptr &&
6337                     update.image->isReferenced() && update.image->get().valid()));
6338 
6339             uint32_t updateBaseLayer, updateLayerCount;
6340             update.getDestSubresource(mLayerCount, &updateBaseLayer, &updateLayerCount);
6341 
6342             // If the update layers don't intersect the requested layers, skip the update.
6343             const bool areUpdateLayersOutsideRange =
6344                 updateBaseLayer + updateLayerCount <= layerStart || updateBaseLayer >= layerEnd;
6345 
6346             const LevelIndex updateMipLevelVk = toVkLevel(updateMipLevelGL);
6347 
6348             // Additionally, if updates to this level are specifically asked to be skipped, skip
6349             // them. This can happen when recreating an image that has been partially incompatibly
6350             // redefined, in which case only updates to the levels that haven't been redefined
6351             // should be flushed.
6352             if (areUpdateLayersOutsideRange || skipLevelsMask.test(updateMipLevelVk.get()))
6353             {
6354                 updatesToKeep.emplace_back(std::move(update));
6355                 continue;
6356             }
6357 
6358             // The updates were holding gl::LevelIndex values so that they would not need
6359             // modification when the base level of the texture changes.  Now that the update is
6360             // about to take effect, we need to change miplevel to LevelIndex.
6361             if (update.updateSource == UpdateSource::Clear)
6362             {
6363                 update.data.clear.levelIndex = updateMipLevelVk.get();
6364             }
6365             else if (update.updateSource == UpdateSource::Buffer)
6366             {
6367                 update.data.buffer.copyRegion.imageSubresource.mipLevel = updateMipLevelVk.get();
6368             }
6369             else if (update.updateSource == UpdateSource::Image)
6370             {
6371                 update.data.image.copyRegion.dstSubresource.mipLevel = updateMipLevelVk.get();
6372             }
6373 
6374             if (updateLayerCount >= kMaxParallelSubresourceUpload)
6375             {
6376                 // If there are more subresources than bits we can track, always insert a barrier.
6377                 recordWriteBarrier(contextVk, aspectFlags, ImageLayout::TransferDst, commandBuffer);
6378                 subresourceUploadsInProgress = std::numeric_limits<uint64_t>::max();
6379             }
6380             else
6381             {
6382                 const uint64_t subresourceHashRange = angle::BitMask<uint64_t>(updateLayerCount);
6383                 const uint32_t subresourceHashOffset =
6384                     updateBaseLayer % kMaxParallelSubresourceUpload;
6385                 const uint64_t subresourceHash =
6386                     ANGLE_ROTL64(subresourceHashRange, subresourceHashOffset);
6387 
6388                 if ((subresourceUploadsInProgress & subresourceHash) != 0)
6389                 {
6390                     // If there's overlap in subresource upload, issue a barrier.
6391                     recordWriteBarrier(contextVk, aspectFlags, ImageLayout::TransferDst,
6392                                        commandBuffer);
6393                     subresourceUploadsInProgress = 0;
6394                 }
6395                 subresourceUploadsInProgress |= subresourceHash;
6396             }
6397 
6398             if (update.updateSource == UpdateSource::Clear)
6399             {
6400                 clear(update.data.clear.aspectFlags, update.data.clear.value, updateMipLevelVk,
6401                       updateBaseLayer, updateLayerCount, commandBuffer);
6402                 // Remember the latest operation is a clear call
6403                 mCurrentSingleClearValue = update.data.clear;
6404 
6405                 // Do not call onWrite as it removes mCurrentSingleClearValue, but instead call
6406                 // setContentDefined directly.
6407                 setContentDefined(updateMipLevelVk, 1, updateBaseLayer, updateLayerCount,
6408                                   update.data.clear.aspectFlags);
6409             }
6410             else if (update.updateSource == UpdateSource::Buffer)
6411             {
6412                 BufferUpdate &bufferUpdate = update.data.buffer;
6413 
6414                 BufferHelper *currentBuffer = bufferUpdate.bufferHelper;
6415                 ASSERT(currentBuffer && currentBuffer->valid());
6416 
6417                 CommandBufferAccess bufferAccess;
6418                 bufferAccess.onBufferTransferRead(currentBuffer);
6419                 ANGLE_TRY(
6420                     contextVk->getOutsideRenderPassCommandBuffer(bufferAccess, &commandBuffer));
6421 
6422                 commandBuffer->copyBufferToImage(currentBuffer->getBuffer().getHandle(), mImage,
6423                                                  getCurrentLayout(), 1,
6424                                                  &update.data.buffer.copyRegion);
6425                 onWrite(updateMipLevelGL, 1, updateBaseLayer, updateLayerCount,
6426                         update.data.buffer.copyRegion.imageSubresource.aspectMask);
6427             }
6428             else
6429             {
6430                 CommandBufferAccess imageAccess;
6431                 imageAccess.onImageTransferRead(aspectFlags, &update.image->get());
6432                 ANGLE_TRY(
6433                     contextVk->getOutsideRenderPassCommandBuffer(imageAccess, &commandBuffer));
6434 
6435                 commandBuffer->copyImage(update.image->get().getImage(),
6436                                          update.image->get().getCurrentLayout(), mImage,
6437                                          getCurrentLayout(), 1, &update.data.image.copyRegion);
6438                 onWrite(updateMipLevelGL, 1, updateBaseLayer, updateLayerCount,
6439                         update.data.image.copyRegion.dstSubresource.aspectMask);
6440             }
6441 
6442             update.release(contextVk->getRenderer());
6443         }
6444 
6445         // Only remove the updates that were actually applied to the image.
6446         *levelUpdates = std::move(updatesToKeep);
6447     }
6448 
6449     // Compact mSubresourceUpdates, then check if there are any updates left.
6450     size_t compactSize;
6451     for (compactSize = mSubresourceUpdates.size(); compactSize > 0; --compactSize)
6452     {
6453         if (!mSubresourceUpdates[compactSize - 1].empty())
6454         {
6455             break;
6456         }
6457     }
6458     mSubresourceUpdates.resize(compactSize);
6459 
6460     ASSERT(validateSubresourceUpdateImageRefsConsistent());
6461 
6462     // If no updates left, release the staging buffers to save memory.
6463     if (mSubresourceUpdates.empty())
6464     {
6465         mStagingBuffer.releaseInFlightBuffers(contextVk);
6466         mStagingBuffer.release(contextVk->getRenderer());
6467     }
6468 
6469     return angle::Result::Continue;
6470 }
6471 
flushAllStagedUpdates(ContextVk * contextVk)6472 angle::Result ImageHelper::flushAllStagedUpdates(ContextVk *contextVk)
6473 {
6474     return flushStagedUpdates(contextVk, mFirstAllocatedLevel, mFirstAllocatedLevel + mLevelCount,
6475                               0, mLayerCount, {});
6476 }
6477 
hasStagedUpdatesForSubresource(gl::LevelIndex levelGL,uint32_t layer,uint32_t layerCount) const6478 bool ImageHelper::hasStagedUpdatesForSubresource(gl::LevelIndex levelGL,
6479                                                  uint32_t layer,
6480                                                  uint32_t layerCount) const
6481 {
6482     // Check to see if any updates are staged for the given level and layer
6483 
6484     const std::vector<SubresourceUpdate> *levelUpdates = getLevelUpdates(levelGL);
6485     if (levelUpdates == nullptr || levelUpdates->empty())
6486     {
6487         return false;
6488     }
6489 
6490     for (const SubresourceUpdate &update : *levelUpdates)
6491     {
6492         uint32_t updateBaseLayer, updateLayerCount;
6493         update.getDestSubresource(mLayerCount, &updateBaseLayer, &updateLayerCount);
6494 
6495         const uint32_t updateLayerEnd = updateBaseLayer + updateLayerCount;
6496         const uint32_t layerEnd       = layer + layerCount;
6497 
6498         if ((layer >= updateBaseLayer && layer < updateLayerEnd) ||
6499             (layerEnd > updateBaseLayer && layerEnd <= updateLayerEnd))
6500         {
6501             // The layers intersect with the update range
6502             return true;
6503         }
6504     }
6505 
6506     return false;
6507 }
6508 
getLastAllocatedLevel() const6509 gl::LevelIndex ImageHelper::getLastAllocatedLevel() const
6510 {
6511     return mFirstAllocatedLevel + mLevelCount - 1;
6512 }
6513 
hasStagedUpdatesInAllocatedLevels() const6514 bool ImageHelper::hasStagedUpdatesInAllocatedLevels() const
6515 {
6516     return hasStagedUpdatesInLevels(mFirstAllocatedLevel, getLastAllocatedLevel() + 1);
6517 }
6518 
hasStagedUpdatesInLevels(gl::LevelIndex levelStart,gl::LevelIndex levelEnd) const6519 bool ImageHelper::hasStagedUpdatesInLevels(gl::LevelIndex levelStart, gl::LevelIndex levelEnd) const
6520 {
6521     for (gl::LevelIndex level = levelStart; level < levelEnd; ++level)
6522     {
6523         const std::vector<SubresourceUpdate> *levelUpdates = getLevelUpdates(level);
6524         if (levelUpdates == nullptr)
6525         {
6526             ASSERT(static_cast<size_t>(level.get()) >= mSubresourceUpdates.size());
6527             return false;
6528         }
6529 
6530         if (!levelUpdates->empty())
6531         {
6532             return true;
6533         }
6534     }
6535     return false;
6536 }
6537 
validateSubresourceUpdateImageRefConsistent(RefCounted<ImageHelper> * image) const6538 bool ImageHelper::validateSubresourceUpdateImageRefConsistent(RefCounted<ImageHelper> *image) const
6539 {
6540     if (image == nullptr)
6541     {
6542         return true;
6543     }
6544 
6545     uint32_t refs = 0;
6546 
6547     for (const std::vector<SubresourceUpdate> &levelUpdates : mSubresourceUpdates)
6548     {
6549         for (const SubresourceUpdate &update : levelUpdates)
6550         {
6551             if (update.updateSource == UpdateSource::Image && update.image == image)
6552             {
6553                 ++refs;
6554             }
6555         }
6556     }
6557 
6558     return image->isRefCountAsExpected(refs);
6559 }
6560 
validateSubresourceUpdateImageRefsConsistent() const6561 bool ImageHelper::validateSubresourceUpdateImageRefsConsistent() const
6562 {
6563     for (const std::vector<SubresourceUpdate> &levelUpdates : mSubresourceUpdates)
6564     {
6565         for (const SubresourceUpdate &update : levelUpdates)
6566         {
6567             if (update.updateSource == UpdateSource::Image &&
6568                 !validateSubresourceUpdateImageRefConsistent(update.image))
6569             {
6570                 return false;
6571             }
6572         }
6573     }
6574 
6575     return true;
6576 }
6577 
removeSupersededUpdates(ContextVk * contextVk,gl::TexLevelMask skipLevelsMask)6578 void ImageHelper::removeSupersededUpdates(ContextVk *contextVk, gl::TexLevelMask skipLevelsMask)
6579 {
6580     if (mLayerCount > 64)
6581     {
6582         // Not implemented for images with more than 64 layers.  A 64-bit mask is used for
6583         // efficiency, hence the limit.
6584         return;
6585     }
6586 
6587     ASSERT(validateSubresourceUpdateImageRefsConsistent());
6588 
6589     RendererVk *renderer = contextVk->getRenderer();
6590 
6591     // Go over updates in reverse order, and mark the layers they completely overwrite.  If an
6592     // update is encountered whose layers are all already marked, that update is superseded by
6593     // future updates, so it can be dropped.  This tracking is done per level.  If the aspect being
6594     // written to is color/depth or stencil, index 0 or 1 is used respectively.  This is so
6595     // that if a depth write for example covers the whole subresource, a stencil write to that same
6596     // subresource is not dropped.
6597     constexpr size_t kIndexColorOrDepth = 0;
6598     constexpr size_t kIndexStencil      = 1;
6599     uint64_t supersededLayers[2]        = {};
6600 
6601     gl::Extents levelExtents = {};
6602 
6603     // Note: this lambda only needs |this|, but = is specified because clang warns about kIndex* not
6604     // needing capture, while MSVC fails to compile without capturing them.
6605     auto markLayersAndDropSuperseded = [=, &supersededLayers,
6606                                         &levelExtents](SubresourceUpdate &update) {
6607         uint32_t updateBaseLayer, updateLayerCount;
6608         update.getDestSubresource(mLayerCount, &updateBaseLayer, &updateLayerCount);
6609 
6610         const VkImageAspectFlags aspectMask = update.getDestAspectFlags();
6611         const bool hasColorOrDepth =
6612             (aspectMask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT |
6613                            VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT |
6614                            VK_IMAGE_ASPECT_DEPTH_BIT)) != 0;
6615         const bool hasStencil = (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != 0;
6616 
6617         // Test if the update is to layers that are all superseded.  In that case, drop the update.
6618         ASSERT(updateLayerCount <= 64);
6619         uint64_t updateLayersMask = updateLayerCount >= 64
6620                                         ? ~static_cast<uint64_t>(0)
6621                                         : angle::BitMask<uint64_t>(updateLayerCount);
6622         updateLayersMask <<= updateBaseLayer;
6623 
6624         const bool isColorOrDepthSuperseded =
6625             !hasColorOrDepth ||
6626             (supersededLayers[kIndexColorOrDepth] & updateLayersMask) == updateLayersMask;
6627         const bool isStencilSuperseded =
6628             !hasStencil || (supersededLayers[kIndexStencil] & updateLayersMask) == updateLayersMask;
6629 
6630         if (isColorOrDepthSuperseded && isStencilSuperseded)
6631         {
6632             ANGLE_PERF_WARNING(contextVk->getDebug(), GL_DEBUG_SEVERITY_LOW,
6633                                "Dropped image update that is superseded by an overlapping one");
6634 
6635             update.release(renderer);
6636             return true;
6637         }
6638 
6639         // Get the area this update affects.  Note that clear updates always clear the whole
6640         // subresource.
6641         gl::Box updateBox(gl::kOffsetZero, levelExtents);
6642 
6643         if (update.updateSource == UpdateSource::Buffer)
6644         {
6645             updateBox = gl::Box(update.data.buffer.copyRegion.imageOffset,
6646                                 update.data.buffer.copyRegion.imageExtent);
6647         }
6648         else if (update.updateSource == UpdateSource::Image)
6649         {
6650             updateBox = gl::Box(update.data.image.copyRegion.dstOffset,
6651                                 update.data.image.copyRegion.extent);
6652         }
6653 
6654         // Only if the update is to the whole subresource, mark its layers.
6655         if (updateBox.coversSameExtent(levelExtents))
6656         {
6657             if (hasColorOrDepth)
6658             {
6659                 supersededLayers[kIndexColorOrDepth] |= updateLayersMask;
6660             }
6661             if (hasStencil)
6662             {
6663                 supersededLayers[kIndexStencil] |= updateLayersMask;
6664             }
6665         }
6666 
6667         return false;
6668     };
6669 
6670     for (LevelIndex levelVk(0); levelVk < LevelIndex(mLevelCount); ++levelVk)
6671     {
6672         gl::LevelIndex levelGL                       = toGLLevel(levelVk);
6673         std::vector<SubresourceUpdate> *levelUpdates = getLevelUpdates(levelGL);
6674         if (levelUpdates == nullptr)
6675         {
6676             ASSERT(static_cast<size_t>(levelGL.get()) >= mSubresourceUpdates.size());
6677             break;
6678         }
6679 
6680         // If level is skipped (because incompatibly redefined), don't remove any of its updates.
6681         if (skipLevelsMask.test(levelVk.get()))
6682         {
6683             continue;
6684         }
6685 
6686         levelExtents                         = getLevelExtents(levelVk);
6687         supersededLayers[kIndexColorOrDepth] = 0;
6688         supersededLayers[kIndexStencil]      = 0;
6689 
6690         levelUpdates->erase(levelUpdates->rend().base(),
6691                             std::remove_if(levelUpdates->rbegin(), levelUpdates->rend(),
6692                                            markLayersAndDropSuperseded)
6693                                 .base());
6694     }
6695 
6696     ASSERT(validateSubresourceUpdateImageRefsConsistent());
6697 }
6698 
copyImageDataToBuffer(ContextVk * contextVk,gl::LevelIndex sourceLevelGL,uint32_t layerCount,uint32_t baseLayer,const gl::Box & sourceArea,BufferHelper ** bufferOut,size_t * bufferSize,StagingBufferOffsetArray * bufferOffsetsOut,uint8_t ** outDataPtr)6699 angle::Result ImageHelper::copyImageDataToBuffer(ContextVk *contextVk,
6700                                                  gl::LevelIndex sourceLevelGL,
6701                                                  uint32_t layerCount,
6702                                                  uint32_t baseLayer,
6703                                                  const gl::Box &sourceArea,
6704                                                  BufferHelper **bufferOut,
6705                                                  size_t *bufferSize,
6706                                                  StagingBufferOffsetArray *bufferOffsetsOut,
6707                                                  uint8_t **outDataPtr)
6708 {
6709     ANGLE_TRACE_EVENT0("gpu.angle", "ImageHelper::copyImageDataToBuffer");
6710 
6711     const angle::Format &imageFormat = mFormat->actualImageFormat();
6712 
6713     // Two VK formats (one depth-only, one combined depth/stencil) use an extra byte for depth.
6714     // From https://www.khronos.org/registry/vulkan/specs/1.1/html/vkspec.html#VkBufferImageCopy:
6715     //  data copied to or from the depth aspect of a VK_FORMAT_X8_D24_UNORM_PACK32 or
6716     //  VK_FORMAT_D24_UNORM_S8_UINT format is packed with one 32-bit word per texel...
6717     // So make sure if we hit the depth/stencil format that we have 5 bytes per pixel (4 for depth
6718     //  data, 1 for stencil). NOTE that depth-only VK_FORMAT_X8_D24_UNORM_PACK32 already has 4 bytes
6719     //  per pixel which is sufficient to contain its depth aspect (no stencil aspect).
6720     uint32_t pixelBytes         = imageFormat.pixelBytes;
6721     uint32_t depthBytesPerPixel = imageFormat.depthBits >> 3;
6722     if (mFormat->actualImageVkFormat() == VK_FORMAT_D24_UNORM_S8_UINT)
6723     {
6724         pixelBytes         = 5;
6725         depthBytesPerPixel = 4;
6726     }
6727 
6728     *bufferSize = sourceArea.width * sourceArea.height * sourceArea.depth * pixelBytes * layerCount;
6729 
6730     const VkImageAspectFlags aspectFlags = getAspectFlags();
6731 
6732     // Allocate staging buffer data from context
6733     VkBuffer bufferHandle;
6734     size_t alignment = mStagingBuffer.getAlignment();
6735     ANGLE_TRY(mStagingBuffer.allocateWithAlignment(contextVk, *bufferSize, alignment, outDataPtr,
6736                                                    &bufferHandle, &(*bufferOffsetsOut)[0],
6737                                                    nullptr));
6738     *bufferOut = mStagingBuffer.getCurrentBuffer();
6739 
6740     LevelIndex sourceLevelVk = toVkLevel(sourceLevelGL);
6741 
6742     VkBufferImageCopy regions[2] = {};
6743     // Default to non-combined DS case
6744     regions[0].bufferOffset                    = (*bufferOffsetsOut)[0];
6745     regions[0].bufferRowLength                 = 0;
6746     regions[0].bufferImageHeight               = 0;
6747     regions[0].imageExtent.width               = sourceArea.width;
6748     regions[0].imageExtent.height              = sourceArea.height;
6749     regions[0].imageExtent.depth               = sourceArea.depth;
6750     regions[0].imageOffset.x                   = sourceArea.x;
6751     regions[0].imageOffset.y                   = sourceArea.y;
6752     regions[0].imageOffset.z                   = sourceArea.z;
6753     regions[0].imageSubresource.aspectMask     = aspectFlags;
6754     regions[0].imageSubresource.baseArrayLayer = baseLayer;
6755     regions[0].imageSubresource.layerCount     = layerCount;
6756     regions[0].imageSubresource.mipLevel       = sourceLevelVk.get();
6757 
6758     if (isCombinedDepthStencilFormat())
6759     {
6760         // For combined DS image we'll copy depth and stencil aspects separately
6761         // Depth aspect comes first in buffer and can use most settings from above
6762         regions[0].imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
6763 
6764         // Get depth data size since stencil data immediately follows depth data in buffer
6765         const VkDeviceSize depthSize = depthBytesPerPixel * sourceArea.width * sourceArea.height *
6766                                        sourceArea.depth * layerCount;
6767 
6768         // Double-check that we allocated enough buffer space (always 1 byte per stencil)
6769         ASSERT(*bufferSize >= (depthSize + (sourceArea.width * sourceArea.height *
6770                                             sourceArea.depth * layerCount)));
6771 
6772         // Copy stencil data into buffer immediately following the depth data
6773         const VkDeviceSize stencilOffset       = (*bufferOffsetsOut)[0] + depthSize;
6774         (*bufferOffsetsOut)[1]                 = stencilOffset;
6775         regions[1]                             = regions[0];
6776         regions[1].bufferOffset                = stencilOffset;
6777         regions[1].imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
6778     }
6779 
6780     CommandBufferAccess access;
6781     access.onBufferTransferWrite(*bufferOut);
6782     access.onImageTransferRead(aspectFlags, this);
6783 
6784     CommandBuffer *commandBuffer;
6785     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(access, &commandBuffer));
6786 
6787     commandBuffer->copyImageToBuffer(mImage, getCurrentLayout(), bufferHandle, 1, regions);
6788 
6789     return angle::Result::Continue;
6790 }
6791 
6792 // static
GetReadPixelsParams(ContextVk * contextVk,const gl::PixelPackState & packState,gl::Buffer * packBuffer,GLenum format,GLenum type,const gl::Rectangle & area,const gl::Rectangle & clippedArea,PackPixelsParams * paramsOut,GLuint * skipBytesOut)6793 angle::Result ImageHelper::GetReadPixelsParams(ContextVk *contextVk,
6794                                                const gl::PixelPackState &packState,
6795                                                gl::Buffer *packBuffer,
6796                                                GLenum format,
6797                                                GLenum type,
6798                                                const gl::Rectangle &area,
6799                                                const gl::Rectangle &clippedArea,
6800                                                PackPixelsParams *paramsOut,
6801                                                GLuint *skipBytesOut)
6802 {
6803     const gl::InternalFormat &sizedFormatInfo = gl::GetInternalFormatInfo(format, type);
6804 
6805     GLuint outputPitch = 0;
6806     ANGLE_VK_CHECK_MATH(contextVk,
6807                         sizedFormatInfo.computeRowPitch(type, area.width, packState.alignment,
6808                                                         packState.rowLength, &outputPitch));
6809     ANGLE_VK_CHECK_MATH(contextVk, sizedFormatInfo.computeSkipBytes(type, outputPitch, 0, packState,
6810                                                                     false, skipBytesOut));
6811 
6812     *skipBytesOut += (clippedArea.x - area.x) * sizedFormatInfo.pixelBytes +
6813                      (clippedArea.y - area.y) * outputPitch;
6814 
6815     const angle::Format &angleFormat = GetFormatFromFormatType(format, type);
6816 
6817     *paramsOut = PackPixelsParams(clippedArea, angleFormat, outputPitch, packState.reverseRowOrder,
6818                                   packBuffer, 0);
6819     return angle::Result::Continue;
6820 }
6821 
readPixelsForGetImage(ContextVk * contextVk,const gl::PixelPackState & packState,gl::Buffer * packBuffer,gl::LevelIndex levelGL,uint32_t layer,GLenum format,GLenum type,void * pixels)6822 angle::Result ImageHelper::readPixelsForGetImage(ContextVk *contextVk,
6823                                                  const gl::PixelPackState &packState,
6824                                                  gl::Buffer *packBuffer,
6825                                                  gl::LevelIndex levelGL,
6826                                                  uint32_t layer,
6827                                                  GLenum format,
6828                                                  GLenum type,
6829                                                  void *pixels)
6830 {
6831     const angle::Format &angleFormat = GetFormatFromFormatType(format, type);
6832 
6833     VkImageAspectFlagBits aspectFlags = {};
6834     if (angleFormat.redBits > 0 || angleFormat.blueBits > 0 || angleFormat.greenBits > 0 ||
6835         angleFormat.alphaBits > 0 || angleFormat.luminanceBits > 0)
6836     {
6837         aspectFlags = VK_IMAGE_ASPECT_COLOR_BIT;
6838     }
6839     else
6840     {
6841         if (angleFormat.depthBits > 0)
6842         {
6843             if (angleFormat.stencilBits != 0)
6844             {
6845                 // TODO (anglebug.com/4688) Support combined depth stencil for GetTexImage
6846                 WARN() << "Unable to pull combined depth/stencil for GetTexImage";
6847                 return angle::Result::Continue;
6848             }
6849             aspectFlags = VK_IMAGE_ASPECT_DEPTH_BIT;
6850         }
6851         if (angleFormat.stencilBits > 0)
6852         {
6853             aspectFlags = VK_IMAGE_ASPECT_STENCIL_BIT;
6854         }
6855     }
6856 
6857     ASSERT(aspectFlags != 0);
6858 
6859     PackPixelsParams params;
6860     GLuint outputSkipBytes = 0;
6861 
6862     const LevelIndex levelVk     = toVkLevel(levelGL);
6863     const gl::Extents mipExtents = getLevelExtents(levelVk);
6864     gl::Rectangle area(0, 0, mipExtents.width, mipExtents.height);
6865 
6866     ANGLE_TRY(GetReadPixelsParams(contextVk, packState, packBuffer, format, type, area, area,
6867                                   &params, &outputSkipBytes));
6868 
6869     // Use a temporary staging buffer. Could be optimized.
6870     RendererScoped<DynamicBuffer> stagingBuffer(contextVk->getRenderer());
6871     stagingBuffer.get().init(contextVk->getRenderer(), VK_BUFFER_USAGE_TRANSFER_DST_BIT, 1,
6872                              kStagingBufferSize, true, DynamicBufferPolicy::OneShotUse);
6873 
6874     if (mExtents.depth > 1)
6875     {
6876         // Depth > 1 means this is a 3D texture and we need to copy all layers
6877         for (layer = 0; layer < static_cast<uint32_t>(mipExtents.depth); layer++)
6878         {
6879             ANGLE_TRY(readPixels(contextVk, area, params, aspectFlags, levelGL, layer,
6880                                  static_cast<uint8_t *>(pixels) + outputSkipBytes,
6881                                  &stagingBuffer.get()));
6882 
6883             outputSkipBytes += mipExtents.width * mipExtents.height *
6884                                gl::GetInternalFormatInfo(format, type).pixelBytes;
6885         }
6886     }
6887     else
6888     {
6889         ANGLE_TRY(readPixels(contextVk, area, params, aspectFlags, levelGL, layer,
6890                              static_cast<uint8_t *>(pixels) + outputSkipBytes,
6891                              &stagingBuffer.get()));
6892     }
6893 
6894     return angle::Result::Continue;
6895 }
6896 
readPixels(ContextVk * contextVk,const gl::Rectangle & area,const PackPixelsParams & packPixelsParams,VkImageAspectFlagBits copyAspectFlags,gl::LevelIndex levelGL,uint32_t layer,void * pixels,DynamicBuffer * stagingBuffer)6897 angle::Result ImageHelper::readPixels(ContextVk *contextVk,
6898                                       const gl::Rectangle &area,
6899                                       const PackPixelsParams &packPixelsParams,
6900                                       VkImageAspectFlagBits copyAspectFlags,
6901                                       gl::LevelIndex levelGL,
6902                                       uint32_t layer,
6903                                       void *pixels,
6904                                       DynamicBuffer *stagingBuffer)
6905 {
6906     ANGLE_TRACE_EVENT0("gpu.angle", "ImageHelper::readPixels");
6907 
6908     RendererVk *renderer = contextVk->getRenderer();
6909 
6910     // If the source image is multisampled, we need to resolve it into a temporary image before
6911     // performing a readback.
6912     bool isMultisampled = mSamples > 1;
6913     RendererScoped<ImageHelper> resolvedImage(contextVk->getRenderer());
6914 
6915     ImageHelper *src = this;
6916 
6917     ASSERT(!hasStagedUpdatesForSubresource(levelGL, layer, 1));
6918 
6919     if (isMultisampled)
6920     {
6921         ANGLE_TRY(resolvedImage.get().init2DStaging(
6922             contextVk, contextVk->hasProtectedContent(), renderer->getMemoryProperties(),
6923             gl::Extents(area.width, area.height, 1), *mFormat,
6924             VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, 1));
6925         resolvedImage.get().retain(&contextVk->getResourceUseList());
6926     }
6927 
6928     VkImageAspectFlags layoutChangeAspectFlags = src->getAspectFlags();
6929 
6930     // Note that although we're reading from the image, we need to update the layout below.
6931     CommandBufferAccess access;
6932     access.onImageTransferRead(layoutChangeAspectFlags, this);
6933     if (isMultisampled)
6934     {
6935         access.onImageTransferWrite(gl::LevelIndex(0), 1, 0, 1, layoutChangeAspectFlags,
6936                                     &resolvedImage.get());
6937     }
6938 
6939     CommandBuffer *commandBuffer;
6940     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(access, &commandBuffer));
6941 
6942     const angle::Format *readFormat = &mFormat->actualImageFormat();
6943 
6944     if (copyAspectFlags != VK_IMAGE_ASPECT_COLOR_BIT)
6945     {
6946         readFormat = &GetDepthStencilImageToBufferFormat(*readFormat, copyAspectFlags);
6947     }
6948 
6949     VkOffset3D srcOffset = {area.x, area.y, 0};
6950 
6951     VkImageSubresourceLayers srcSubresource = {};
6952     srcSubresource.aspectMask               = copyAspectFlags;
6953     srcSubresource.mipLevel                 = toVkLevel(levelGL).get();
6954     srcSubresource.baseArrayLayer           = layer;
6955     srcSubresource.layerCount               = 1;
6956 
6957     VkExtent3D srcExtent = {static_cast<uint32_t>(area.width), static_cast<uint32_t>(area.height),
6958                             1};
6959 
6960     if (mExtents.depth > 1)
6961     {
6962         // Depth > 1 means this is a 3D texture and we need special handling
6963         srcOffset.z                   = layer;
6964         srcSubresource.baseArrayLayer = 0;
6965     }
6966 
6967     if (isMultisampled)
6968     {
6969         // Note: resolve only works on color images (not depth/stencil).
6970         ASSERT(copyAspectFlags == VK_IMAGE_ASPECT_COLOR_BIT);
6971 
6972         VkImageResolve resolveRegion                = {};
6973         resolveRegion.srcSubresource                = srcSubresource;
6974         resolveRegion.srcOffset                     = srcOffset;
6975         resolveRegion.dstSubresource.aspectMask     = copyAspectFlags;
6976         resolveRegion.dstSubresource.mipLevel       = 0;
6977         resolveRegion.dstSubresource.baseArrayLayer = 0;
6978         resolveRegion.dstSubresource.layerCount     = 1;
6979         resolveRegion.dstOffset                     = {};
6980         resolveRegion.extent                        = srcExtent;
6981 
6982         resolve(&resolvedImage.get(), resolveRegion, commandBuffer);
6983 
6984         CommandBufferAccess readAccess;
6985         readAccess.onImageTransferRead(layoutChangeAspectFlags, &resolvedImage.get());
6986         ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(readAccess, &commandBuffer));
6987 
6988         // Make the resolved image the target of buffer copy.
6989         src                           = &resolvedImage.get();
6990         srcOffset                     = {0, 0, 0};
6991         srcSubresource.baseArrayLayer = 0;
6992         srcSubresource.layerCount     = 1;
6993         srcSubresource.mipLevel       = 0;
6994     }
6995 
6996     // If PBO and if possible, copy directly on the GPU.
6997     if (packPixelsParams.packBuffer &&
6998         CanCopyWithTransformForReadPixels(packPixelsParams, mFormat, readFormat))
6999     {
7000         VkDeviceSize packBufferOffset = 0;
7001         BufferHelper &packBuffer =
7002             GetImpl(packPixelsParams.packBuffer)->getBufferAndOffset(&packBufferOffset);
7003 
7004         CommandBufferAccess copyAccess;
7005         copyAccess.onBufferTransferWrite(&packBuffer);
7006         copyAccess.onImageTransferRead(copyAspectFlags, src);
7007 
7008         CommandBuffer *copyCommandBuffer;
7009         ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(copyAccess, &copyCommandBuffer));
7010 
7011         ASSERT(packPixelsParams.outputPitch % readFormat->pixelBytes == 0);
7012 
7013         VkBufferImageCopy region = {};
7014         region.bufferImageHeight = srcExtent.height;
7015         region.bufferOffset =
7016             packBufferOffset + packPixelsParams.offset + reinterpret_cast<ptrdiff_t>(pixels);
7017         region.bufferRowLength  = packPixelsParams.outputPitch / readFormat->pixelBytes;
7018         region.imageExtent      = srcExtent;
7019         region.imageOffset      = srcOffset;
7020         region.imageSubresource = srcSubresource;
7021 
7022         copyCommandBuffer->copyImageToBuffer(src->getImage(), src->getCurrentLayout(),
7023                                              packBuffer.getBuffer().getHandle(), 1, &region);
7024         return angle::Result::Continue;
7025     }
7026 
7027     VkBuffer bufferHandle      = VK_NULL_HANDLE;
7028     uint8_t *readPixelBuffer   = nullptr;
7029     VkDeviceSize stagingOffset = 0;
7030     size_t allocationSize      = readFormat->pixelBytes * area.width * area.height;
7031 
7032     ANGLE_TRY(stagingBuffer->allocate(contextVk, allocationSize, &readPixelBuffer, &bufferHandle,
7033                                       &stagingOffset, nullptr));
7034 
7035     VkBufferImageCopy region = {};
7036     region.bufferImageHeight = srcExtent.height;
7037     region.bufferOffset      = stagingOffset;
7038     region.bufferRowLength   = srcExtent.width;
7039     region.imageExtent       = srcExtent;
7040     region.imageOffset       = srcOffset;
7041     region.imageSubresource  = srcSubresource;
7042 
7043     CommandBufferAccess readbackAccess;
7044     readbackAccess.onBufferTransferWrite(stagingBuffer->getCurrentBuffer());
7045 
7046     CommandBuffer *readbackCommandBuffer;
7047     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(readbackAccess, &readbackCommandBuffer));
7048 
7049     readbackCommandBuffer->copyImageToBuffer(src->getImage(), src->getCurrentLayout(), bufferHandle,
7050                                              1, &region);
7051 
7052     ANGLE_PERF_WARNING(contextVk->getDebug(), GL_DEBUG_SEVERITY_HIGH,
7053                        "GPU stall due to ReadPixels");
7054 
7055     // Triggers a full finish.
7056     // TODO(jmadill): Don't block on asynchronous readback.
7057     ANGLE_TRY(contextVk->finishImpl());
7058 
7059     // The buffer we copied to needs to be invalidated before we read from it because its not been
7060     // created with the host coherent bit.
7061     ANGLE_TRY(stagingBuffer->invalidate(contextVk));
7062 
7063     if (packPixelsParams.packBuffer)
7064     {
7065         // Must map the PBO in order to read its contents (and then unmap it later)
7066         BufferVk *packBufferVk = GetImpl(packPixelsParams.packBuffer);
7067         void *mapPtr           = nullptr;
7068         ANGLE_TRY(packBufferVk->mapImpl(contextVk, &mapPtr));
7069         uint8_t *dest = static_cast<uint8_t *>(mapPtr) + reinterpret_cast<ptrdiff_t>(pixels);
7070         PackPixels(packPixelsParams, *readFormat, area.width * readFormat->pixelBytes,
7071                    readPixelBuffer, static_cast<uint8_t *>(dest));
7072         ANGLE_TRY(packBufferVk->unmapImpl(contextVk));
7073     }
7074     else
7075     {
7076         PackPixels(packPixelsParams, *readFormat, area.width * readFormat->pixelBytes,
7077                    readPixelBuffer, static_cast<uint8_t *>(pixels));
7078     }
7079 
7080     return angle::Result::Continue;
7081 }
7082 
7083 // ImageHelper::SubresourceUpdate implementation
SubresourceUpdate()7084 ImageHelper::SubresourceUpdate::SubresourceUpdate()
7085     : updateSource(UpdateSource::Buffer), image(nullptr)
7086 {
7087     data.buffer.bufferHelper = nullptr;
7088 }
7089 
~SubresourceUpdate()7090 ImageHelper::SubresourceUpdate::~SubresourceUpdate() {}
7091 
SubresourceUpdate(BufferHelper * bufferHelperIn,const VkBufferImageCopy & copyRegionIn)7092 ImageHelper::SubresourceUpdate::SubresourceUpdate(BufferHelper *bufferHelperIn,
7093                                                   const VkBufferImageCopy &copyRegionIn)
7094     : updateSource(UpdateSource::Buffer), image(nullptr)
7095 {
7096     data.buffer.bufferHelper = bufferHelperIn;
7097     data.buffer.copyRegion   = copyRegionIn;
7098 }
7099 
SubresourceUpdate(RefCounted<ImageHelper> * imageIn,const VkImageCopy & copyRegionIn)7100 ImageHelper::SubresourceUpdate::SubresourceUpdate(RefCounted<ImageHelper> *imageIn,
7101                                                   const VkImageCopy &copyRegionIn)
7102     : updateSource(UpdateSource::Image), image(imageIn)
7103 {
7104     image->addRef();
7105     data.image.copyRegion = copyRegionIn;
7106 }
7107 
SubresourceUpdate(VkImageAspectFlags aspectFlags,const VkClearValue & clearValue,const gl::ImageIndex & imageIndex)7108 ImageHelper::SubresourceUpdate::SubresourceUpdate(VkImageAspectFlags aspectFlags,
7109                                                   const VkClearValue &clearValue,
7110                                                   const gl::ImageIndex &imageIndex)
7111     : updateSource(UpdateSource::Clear), image(nullptr)
7112 {
7113     data.clear.aspectFlags = aspectFlags;
7114     data.clear.value       = clearValue;
7115     data.clear.levelIndex  = imageIndex.getLevelIndex();
7116     data.clear.layerIndex  = imageIndex.hasLayer() ? imageIndex.getLayerIndex() : 0;
7117     data.clear.layerCount =
7118         imageIndex.hasLayer() ? imageIndex.getLayerCount() : VK_REMAINING_ARRAY_LAYERS;
7119 }
7120 
SubresourceUpdate(SubresourceUpdate && other)7121 ImageHelper::SubresourceUpdate::SubresourceUpdate(SubresourceUpdate &&other)
7122     : updateSource(other.updateSource), image(nullptr)
7123 {
7124     switch (updateSource)
7125     {
7126         case UpdateSource::Clear:
7127             data.clear = other.data.clear;
7128             break;
7129         case UpdateSource::Buffer:
7130             data.buffer = other.data.buffer;
7131             break;
7132         case UpdateSource::Image:
7133             data.image  = other.data.image;
7134             image       = other.image;
7135             other.image = nullptr;
7136             break;
7137         default:
7138             UNREACHABLE();
7139     }
7140 }
7141 
operator =(SubresourceUpdate && other)7142 ImageHelper::SubresourceUpdate &ImageHelper::SubresourceUpdate::operator=(SubresourceUpdate &&other)
7143 {
7144     // Given that the update is a union of three structs, we can't use std::swap on the fields.  For
7145     // example, |this| may be an Image update and |other| may be a Buffer update.
7146     // The following could work:
7147     //
7148     // SubresourceUpdate oldThis;
7149     // Set oldThis to this->field based on updateSource
7150     // Set this->otherField to other.otherField based on other.updateSource
7151     // Set other.field to oldThis->field based on updateSource
7152     // std::Swap(updateSource, other.updateSource);
7153     //
7154     // It's much simpler to just swap the memory instead.
7155 
7156     SubresourceUpdate oldThis;
7157     memcpy(&oldThis, this, sizeof(*this));
7158     memcpy(this, &other, sizeof(*this));
7159     memcpy(&other, &oldThis, sizeof(*this));
7160 
7161     return *this;
7162 }
7163 
release(RendererVk * renderer)7164 void ImageHelper::SubresourceUpdate::release(RendererVk *renderer)
7165 {
7166     if (updateSource == UpdateSource::Image)
7167     {
7168         image->releaseRef();
7169 
7170         if (!image->isReferenced())
7171         {
7172             // Staging images won't be used in render pass attachments.
7173             image->get().releaseImage(renderer);
7174             image->get().releaseStagingBuffer(renderer);
7175             SafeDelete(image);
7176         }
7177 
7178         image = nullptr;
7179     }
7180 }
7181 
isUpdateToLayers(uint32_t layerIndex,uint32_t layerCount) const7182 bool ImageHelper::SubresourceUpdate::isUpdateToLayers(uint32_t layerIndex,
7183                                                       uint32_t layerCount) const
7184 {
7185     uint32_t updateBaseLayer, updateLayerCount;
7186     getDestSubresource(gl::ImageIndex::kEntireLevel, &updateBaseLayer, &updateLayerCount);
7187 
7188     return updateBaseLayer == layerIndex &&
7189            (updateLayerCount == layerCount || updateLayerCount == VK_REMAINING_ARRAY_LAYERS);
7190 }
7191 
getDestSubresource(uint32_t imageLayerCount,uint32_t * baseLayerOut,uint32_t * layerCountOut) const7192 void ImageHelper::SubresourceUpdate::getDestSubresource(uint32_t imageLayerCount,
7193                                                         uint32_t *baseLayerOut,
7194                                                         uint32_t *layerCountOut) const
7195 {
7196     if (updateSource == UpdateSource::Clear)
7197     {
7198         *baseLayerOut  = data.clear.layerIndex;
7199         *layerCountOut = data.clear.layerCount;
7200 
7201         if (*layerCountOut == static_cast<uint32_t>(gl::ImageIndex::kEntireLevel))
7202         {
7203             *layerCountOut = imageLayerCount;
7204         }
7205     }
7206     else
7207     {
7208         const VkImageSubresourceLayers &dstSubresource =
7209             updateSource == UpdateSource::Buffer ? data.buffer.copyRegion.imageSubresource
7210                                                  : data.image.copyRegion.dstSubresource;
7211         *baseLayerOut  = dstSubresource.baseArrayLayer;
7212         *layerCountOut = dstSubresource.layerCount;
7213 
7214         ASSERT(*layerCountOut != static_cast<uint32_t>(gl::ImageIndex::kEntireLevel));
7215     }
7216 }
7217 
getDestAspectFlags() const7218 VkImageAspectFlags ImageHelper::SubresourceUpdate::getDestAspectFlags() const
7219 {
7220     if (updateSource == UpdateSource::Clear)
7221     {
7222         return data.clear.aspectFlags;
7223     }
7224     else if (updateSource == UpdateSource::Buffer)
7225     {
7226         return data.buffer.copyRegion.imageSubresource.aspectMask;
7227     }
7228     else
7229     {
7230         ASSERT(updateSource == UpdateSource::Image);
7231         return data.image.copyRegion.dstSubresource.aspectMask;
7232     }
7233 }
7234 
getLevelUpdates(gl::LevelIndex level)7235 std::vector<ImageHelper::SubresourceUpdate> *ImageHelper::getLevelUpdates(gl::LevelIndex level)
7236 {
7237     return static_cast<size_t>(level.get()) < mSubresourceUpdates.size()
7238                ? &mSubresourceUpdates[level.get()]
7239                : nullptr;
7240 }
7241 
getLevelUpdates(gl::LevelIndex level) const7242 const std::vector<ImageHelper::SubresourceUpdate> *ImageHelper::getLevelUpdates(
7243     gl::LevelIndex level) const
7244 {
7245     return static_cast<size_t>(level.get()) < mSubresourceUpdates.size()
7246                ? &mSubresourceUpdates[level.get()]
7247                : nullptr;
7248 }
7249 
appendSubresourceUpdate(gl::LevelIndex level,SubresourceUpdate && update)7250 void ImageHelper::appendSubresourceUpdate(gl::LevelIndex level, SubresourceUpdate &&update)
7251 {
7252     if (mSubresourceUpdates.size() <= static_cast<size_t>(level.get()))
7253     {
7254         mSubresourceUpdates.resize(level.get() + 1);
7255     }
7256 
7257     mSubresourceUpdates[level.get()].emplace_back(std::move(update));
7258     onStateChange(angle::SubjectMessage::SubjectChanged);
7259 }
7260 
prependSubresourceUpdate(gl::LevelIndex level,SubresourceUpdate && update)7261 void ImageHelper::prependSubresourceUpdate(gl::LevelIndex level, SubresourceUpdate &&update)
7262 {
7263     if (mSubresourceUpdates.size() <= static_cast<size_t>(level.get()))
7264     {
7265         mSubresourceUpdates.resize(level.get() + 1);
7266     }
7267 
7268     mSubresourceUpdates[level.get()].insert(mSubresourceUpdates[level.get()].begin(),
7269                                             std::move(update));
7270     onStateChange(angle::SubjectMessage::SubjectChanged);
7271 }
7272 
7273 // FramebufferHelper implementation.
7274 FramebufferHelper::FramebufferHelper() = default;
7275 
7276 FramebufferHelper::~FramebufferHelper() = default;
7277 
FramebufferHelper(FramebufferHelper && other)7278 FramebufferHelper::FramebufferHelper(FramebufferHelper &&other) : Resource(std::move(other))
7279 {
7280     mFramebuffer = std::move(other.mFramebuffer);
7281 }
7282 
operator =(FramebufferHelper && other)7283 FramebufferHelper &FramebufferHelper::operator=(FramebufferHelper &&other)
7284 {
7285     std::swap(mUse, other.mUse);
7286     std::swap(mFramebuffer, other.mFramebuffer);
7287     return *this;
7288 }
7289 
init(ContextVk * contextVk,const VkFramebufferCreateInfo & createInfo)7290 angle::Result FramebufferHelper::init(ContextVk *contextVk,
7291                                       const VkFramebufferCreateInfo &createInfo)
7292 {
7293     ANGLE_VK_TRY(contextVk, mFramebuffer.init(contextVk->getDevice(), createInfo));
7294     return angle::Result::Continue;
7295 }
7296 
release(ContextVk * contextVk)7297 void FramebufferHelper::release(ContextVk *contextVk)
7298 {
7299     contextVk->addGarbage(&mFramebuffer);
7300 }
7301 
GetLayerMode(const vk::ImageHelper & image,uint32_t layerCount)7302 LayerMode GetLayerMode(const vk::ImageHelper &image, uint32_t layerCount)
7303 {
7304     const uint32_t imageLayerCount = GetImageLayerCountForView(image);
7305     const bool allLayers           = layerCount == imageLayerCount;
7306 
7307     ASSERT(allLayers || layerCount > 0 && layerCount <= gl::IMPLEMENTATION_MAX_TEXTURE_LEVELS);
7308     return allLayers ? LayerMode::All : static_cast<LayerMode>(layerCount);
7309 }
7310 
7311 // ImageViewHelper implementation.
ImageViewHelper()7312 ImageViewHelper::ImageViewHelper() : mCurrentMaxLevel(0), mLinearColorspace(true) {}
7313 
ImageViewHelper(ImageViewHelper && other)7314 ImageViewHelper::ImageViewHelper(ImageViewHelper &&other) : Resource(std::move(other))
7315 {
7316     std::swap(mUse, other.mUse);
7317 
7318     std::swap(mCurrentMaxLevel, other.mCurrentMaxLevel);
7319     std::swap(mPerLevelLinearReadImageViews, other.mPerLevelLinearReadImageViews);
7320     std::swap(mPerLevelSRGBReadImageViews, other.mPerLevelSRGBReadImageViews);
7321     std::swap(mPerLevelLinearFetchImageViews, other.mPerLevelLinearFetchImageViews);
7322     std::swap(mPerLevelSRGBFetchImageViews, other.mPerLevelSRGBFetchImageViews);
7323     std::swap(mPerLevelLinearCopyImageViews, other.mPerLevelLinearCopyImageViews);
7324     std::swap(mPerLevelSRGBCopyImageViews, other.mPerLevelSRGBCopyImageViews);
7325     std::swap(mLinearColorspace, other.mLinearColorspace);
7326 
7327     std::swap(mPerLevelStencilReadImageViews, other.mPerLevelStencilReadImageViews);
7328     std::swap(mLayerLevelDrawImageViews, other.mLayerLevelDrawImageViews);
7329     std::swap(mLayerLevelDrawImageViewsLinear, other.mLayerLevelDrawImageViewsLinear);
7330     std::swap(mSubresourceDrawImageViews, other.mSubresourceDrawImageViews);
7331     std::swap(mLevelStorageImageViews, other.mLevelStorageImageViews);
7332     std::swap(mLayerLevelStorageImageViews, other.mLayerLevelStorageImageViews);
7333     std::swap(mImageViewSerial, other.mImageViewSerial);
7334 }
7335 
~ImageViewHelper()7336 ImageViewHelper::~ImageViewHelper() {}
7337 
init(RendererVk * renderer)7338 void ImageViewHelper::init(RendererVk *renderer)
7339 {
7340     if (!mImageViewSerial.valid())
7341     {
7342         mImageViewSerial = renderer->getResourceSerialFactory().generateImageOrBufferViewSerial();
7343     }
7344 }
7345 
release(RendererVk * renderer)7346 void ImageViewHelper::release(RendererVk *renderer)
7347 {
7348     std::vector<GarbageObject> garbage;
7349 
7350     mCurrentMaxLevel = LevelIndex(0);
7351 
7352     // Release the read views
7353     ReleaseImageViews(&mPerLevelLinearReadImageViews, &garbage);
7354     ReleaseImageViews(&mPerLevelSRGBReadImageViews, &garbage);
7355     ReleaseImageViews(&mPerLevelLinearFetchImageViews, &garbage);
7356     ReleaseImageViews(&mPerLevelSRGBFetchImageViews, &garbage);
7357     ReleaseImageViews(&mPerLevelLinearCopyImageViews, &garbage);
7358     ReleaseImageViews(&mPerLevelSRGBCopyImageViews, &garbage);
7359     ReleaseImageViews(&mPerLevelStencilReadImageViews, &garbage);
7360 
7361     // Release the draw views
7362     for (ImageViewVector &layerViews : mLayerLevelDrawImageViews)
7363     {
7364         for (ImageView &imageView : layerViews)
7365         {
7366             if (imageView.valid())
7367             {
7368                 garbage.emplace_back(GetGarbage(&imageView));
7369             }
7370         }
7371     }
7372     mLayerLevelDrawImageViews.clear();
7373     for (ImageViewVector &layerViews : mLayerLevelDrawImageViewsLinear)
7374     {
7375         for (ImageView &imageView : layerViews)
7376         {
7377             if (imageView.valid())
7378             {
7379                 garbage.emplace_back(GetGarbage(&imageView));
7380             }
7381         }
7382     }
7383     mLayerLevelDrawImageViewsLinear.clear();
7384     for (auto &iter : mSubresourceDrawImageViews)
7385     {
7386         std::unique_ptr<ImageView> &imageView = iter.second;
7387         if (imageView->valid())
7388         {
7389             garbage.emplace_back(GetGarbage(imageView.get()));
7390         }
7391     }
7392     mSubresourceDrawImageViews.clear();
7393 
7394     // Release the storage views
7395     ReleaseImageViews(&mLevelStorageImageViews, &garbage);
7396     for (ImageViewVector &layerViews : mLayerLevelStorageImageViews)
7397     {
7398         for (ImageView &imageView : layerViews)
7399         {
7400             if (imageView.valid())
7401             {
7402                 garbage.emplace_back(GetGarbage(&imageView));
7403             }
7404         }
7405     }
7406     mLayerLevelStorageImageViews.clear();
7407 
7408     if (!garbage.empty())
7409     {
7410         renderer->collectGarbage(std::move(mUse), std::move(garbage));
7411 
7412         // Ensure the resource use is always valid.
7413         mUse.init();
7414     }
7415 
7416     // Update image view serial.
7417     mImageViewSerial = renderer->getResourceSerialFactory().generateImageOrBufferViewSerial();
7418 }
7419 
destroy(VkDevice device)7420 void ImageViewHelper::destroy(VkDevice device)
7421 {
7422     mCurrentMaxLevel = LevelIndex(0);
7423 
7424     // Release the read views
7425     DestroyImageViews(&mPerLevelLinearReadImageViews, device);
7426     DestroyImageViews(&mPerLevelSRGBReadImageViews, device);
7427     DestroyImageViews(&mPerLevelLinearFetchImageViews, device);
7428     DestroyImageViews(&mPerLevelSRGBFetchImageViews, device);
7429     DestroyImageViews(&mPerLevelLinearCopyImageViews, device);
7430     DestroyImageViews(&mPerLevelSRGBCopyImageViews, device);
7431     DestroyImageViews(&mPerLevelStencilReadImageViews, device);
7432 
7433     // Release the draw views
7434     for (ImageViewVector &layerViews : mLayerLevelDrawImageViews)
7435     {
7436         for (ImageView &imageView : layerViews)
7437         {
7438             imageView.destroy(device);
7439         }
7440     }
7441     mLayerLevelDrawImageViews.clear();
7442     for (ImageViewVector &layerViews : mLayerLevelDrawImageViewsLinear)
7443     {
7444         for (ImageView &imageView : layerViews)
7445         {
7446             imageView.destroy(device);
7447         }
7448     }
7449     mLayerLevelDrawImageViewsLinear.clear();
7450     for (auto &iter : mSubresourceDrawImageViews)
7451     {
7452         std::unique_ptr<ImageView> &imageView = iter.second;
7453         imageView->destroy(device);
7454     }
7455     mSubresourceDrawImageViews.clear();
7456 
7457     // Release the storage views
7458     DestroyImageViews(&mLevelStorageImageViews, device);
7459     for (ImageViewVector &layerViews : mLayerLevelStorageImageViews)
7460     {
7461         for (ImageView &imageView : layerViews)
7462         {
7463             imageView.destroy(device);
7464         }
7465     }
7466     mLayerLevelStorageImageViews.clear();
7467 
7468     mImageViewSerial = kInvalidImageOrBufferViewSerial;
7469 }
7470 
initReadViews(ContextVk * contextVk,gl::TextureType viewType,const ImageHelper & image,const Format & format,const gl::SwizzleState & formatSwizzle,const gl::SwizzleState & readSwizzle,LevelIndex baseLevel,uint32_t levelCount,uint32_t baseLayer,uint32_t layerCount,bool requiresSRGBViews,VkImageUsageFlags imageUsageFlags)7471 angle::Result ImageViewHelper::initReadViews(ContextVk *contextVk,
7472                                              gl::TextureType viewType,
7473                                              const ImageHelper &image,
7474                                              const Format &format,
7475                                              const gl::SwizzleState &formatSwizzle,
7476                                              const gl::SwizzleState &readSwizzle,
7477                                              LevelIndex baseLevel,
7478                                              uint32_t levelCount,
7479                                              uint32_t baseLayer,
7480                                              uint32_t layerCount,
7481                                              bool requiresSRGBViews,
7482                                              VkImageUsageFlags imageUsageFlags)
7483 {
7484     ASSERT(levelCount > 0);
7485     if (levelCount > mPerLevelLinearReadImageViews.size())
7486     {
7487         mPerLevelLinearReadImageViews.resize(levelCount);
7488         mPerLevelSRGBReadImageViews.resize(levelCount);
7489         mPerLevelLinearFetchImageViews.resize(levelCount);
7490         mPerLevelSRGBFetchImageViews.resize(levelCount);
7491         mPerLevelLinearCopyImageViews.resize(levelCount);
7492         mPerLevelSRGBCopyImageViews.resize(levelCount);
7493         mPerLevelStencilReadImageViews.resize(levelCount);
7494     }
7495     mCurrentMaxLevel = LevelIndex(levelCount - 1);
7496 
7497     // Determine if we already have ImageViews for the new max level
7498     if (getReadImageView().valid())
7499     {
7500         return angle::Result::Continue;
7501     }
7502 
7503     // Since we don't have a readImageView, we must create ImageViews for the new max level
7504     ANGLE_TRY(initReadViewsImpl(contextVk, viewType, image, format, formatSwizzle, readSwizzle,
7505                                 baseLevel, levelCount, baseLayer, layerCount));
7506 
7507     if (requiresSRGBViews)
7508     {
7509         ANGLE_TRY(initSRGBReadViewsImpl(contextVk, viewType, image, format, formatSwizzle,
7510                                         readSwizzle, baseLevel, levelCount, baseLayer, layerCount,
7511                                         imageUsageFlags));
7512     }
7513 
7514     return angle::Result::Continue;
7515 }
7516 
initReadViewsImpl(ContextVk * contextVk,gl::TextureType viewType,const ImageHelper & image,const Format & format,const gl::SwizzleState & formatSwizzle,const gl::SwizzleState & readSwizzle,LevelIndex baseLevel,uint32_t levelCount,uint32_t baseLayer,uint32_t layerCount)7517 angle::Result ImageViewHelper::initReadViewsImpl(ContextVk *contextVk,
7518                                                  gl::TextureType viewType,
7519                                                  const ImageHelper &image,
7520                                                  const Format &format,
7521                                                  const gl::SwizzleState &formatSwizzle,
7522                                                  const gl::SwizzleState &readSwizzle,
7523                                                  LevelIndex baseLevel,
7524                                                  uint32_t levelCount,
7525                                                  uint32_t baseLayer,
7526                                                  uint32_t layerCount)
7527 {
7528     ASSERT(mImageViewSerial.valid());
7529 
7530     const VkImageAspectFlags aspectFlags = GetFormatAspectFlags(format.intendedFormat());
7531     mLinearColorspace                    = !format.actualImageFormat().isSRGB;
7532 
7533     if (HasBothDepthAndStencilAspects(aspectFlags))
7534     {
7535         ANGLE_TRY(image.initLayerImageViewWithFormat(
7536             contextVk, viewType, format, VK_IMAGE_ASPECT_DEPTH_BIT, readSwizzle,
7537             &getReadImageView(), baseLevel, levelCount, baseLayer, layerCount));
7538         ANGLE_TRY(image.initLayerImageViewWithFormat(
7539             contextVk, viewType, format, VK_IMAGE_ASPECT_STENCIL_BIT, readSwizzle,
7540             &mPerLevelStencilReadImageViews[mCurrentMaxLevel.get()], baseLevel, levelCount,
7541             baseLayer, layerCount));
7542     }
7543     else
7544     {
7545         ANGLE_TRY(image.initLayerImageViewWithFormat(contextVk, viewType, format, aspectFlags,
7546                                                      readSwizzle, &getReadImageView(), baseLevel,
7547                                                      levelCount, baseLayer, layerCount));
7548     }
7549 
7550     gl::TextureType fetchType = viewType;
7551 
7552     if (viewType == gl::TextureType::CubeMap || viewType == gl::TextureType::_2DArray ||
7553         viewType == gl::TextureType::_2DMultisampleArray)
7554     {
7555         fetchType = Get2DTextureType(layerCount, image.getSamples());
7556 
7557         ANGLE_TRY(image.initLayerImageViewWithFormat(contextVk, fetchType, format, aspectFlags,
7558                                                      readSwizzle, &getFetchImageView(), baseLevel,
7559                                                      levelCount, baseLayer, layerCount));
7560     }
7561 
7562     ANGLE_TRY(image.initLayerImageViewWithFormat(contextVk, fetchType, format, aspectFlags,
7563                                                  formatSwizzle, &getCopyImageView(), baseLevel,
7564                                                  levelCount, baseLayer, layerCount));
7565 
7566     return angle::Result::Continue;
7567 }
7568 
initSRGBReadViewsImpl(ContextVk * contextVk,gl::TextureType viewType,const ImageHelper & image,const Format & format,const gl::SwizzleState & formatSwizzle,const gl::SwizzleState & readSwizzle,LevelIndex baseLevel,uint32_t levelCount,uint32_t baseLayer,uint32_t layerCount,VkImageUsageFlags imageUsageFlags)7569 angle::Result ImageViewHelper::initSRGBReadViewsImpl(ContextVk *contextVk,
7570                                                      gl::TextureType viewType,
7571                                                      const ImageHelper &image,
7572                                                      const Format &format,
7573                                                      const gl::SwizzleState &formatSwizzle,
7574                                                      const gl::SwizzleState &readSwizzle,
7575                                                      LevelIndex baseLevel,
7576                                                      uint32_t levelCount,
7577                                                      uint32_t baseLayer,
7578                                                      uint32_t layerCount,
7579                                                      VkImageUsageFlags imageUsageFlags)
7580 {
7581     // When we select the linear/srgb counterpart formats, we must first make sure they're
7582     // actually supported by the ICD. If they are not supported by the ICD, then we treat that as if
7583     // there is no counterpart format. (In this case, the relevant extension should not be exposed)
7584     angle::FormatID srgbOverrideFormat = ConvertToSRGB(image.getFormat().actualImageFormatID);
7585     ASSERT((srgbOverrideFormat == angle::FormatID::NONE) ||
7586            (HasNonRenderableTextureFormatSupport(contextVk->getRenderer(), srgbOverrideFormat)));
7587 
7588     angle::FormatID linearOverrideFormat = ConvertToLinear(image.getFormat().actualImageFormatID);
7589     ASSERT((linearOverrideFormat == angle::FormatID::NONE) ||
7590            (HasNonRenderableTextureFormatSupport(contextVk->getRenderer(), linearOverrideFormat)));
7591 
7592     angle::FormatID linearFormat = (linearOverrideFormat != angle::FormatID::NONE)
7593                                        ? linearOverrideFormat
7594                                        : format.actualImageFormatID;
7595     ASSERT(linearFormat != angle::FormatID::NONE);
7596 
7597     const VkImageAspectFlags aspectFlags = GetFormatAspectFlags(format.intendedFormat());
7598 
7599     if (!mPerLevelLinearReadImageViews[mCurrentMaxLevel.get()].valid())
7600     {
7601         ANGLE_TRY(image.initReinterpretedLayerImageView(
7602             contextVk, viewType, aspectFlags, readSwizzle,
7603             &mPerLevelLinearReadImageViews[mCurrentMaxLevel.get()], baseLevel, levelCount,
7604             baseLayer, layerCount, imageUsageFlags, linearFormat));
7605     }
7606     if (srgbOverrideFormat != angle::FormatID::NONE &&
7607         !mPerLevelSRGBReadImageViews[mCurrentMaxLevel.get()].valid())
7608     {
7609         ANGLE_TRY(image.initReinterpretedLayerImageView(
7610             contextVk, viewType, aspectFlags, readSwizzle,
7611             &mPerLevelSRGBReadImageViews[mCurrentMaxLevel.get()], baseLevel, levelCount, baseLayer,
7612             layerCount, imageUsageFlags, srgbOverrideFormat));
7613     }
7614 
7615     gl::TextureType fetchType = viewType;
7616 
7617     if (viewType == gl::TextureType::CubeMap || viewType == gl::TextureType::_2DArray ||
7618         viewType == gl::TextureType::_2DMultisampleArray)
7619     {
7620         fetchType = Get2DTextureType(layerCount, image.getSamples());
7621 
7622         if (!mPerLevelLinearFetchImageViews[mCurrentMaxLevel.get()].valid())
7623         {
7624 
7625             ANGLE_TRY(image.initReinterpretedLayerImageView(
7626                 contextVk, fetchType, aspectFlags, readSwizzle,
7627                 &mPerLevelLinearFetchImageViews[mCurrentMaxLevel.get()], baseLevel, levelCount,
7628                 baseLayer, layerCount, imageUsageFlags, linearFormat));
7629         }
7630         if (srgbOverrideFormat != angle::FormatID::NONE &&
7631             !mPerLevelSRGBFetchImageViews[mCurrentMaxLevel.get()].valid())
7632         {
7633             ANGLE_TRY(image.initReinterpretedLayerImageView(
7634                 contextVk, fetchType, aspectFlags, readSwizzle,
7635                 &mPerLevelSRGBFetchImageViews[mCurrentMaxLevel.get()], baseLevel, levelCount,
7636                 baseLayer, layerCount, imageUsageFlags, srgbOverrideFormat));
7637         }
7638     }
7639 
7640     if (!mPerLevelLinearCopyImageViews[mCurrentMaxLevel.get()].valid())
7641     {
7642         ANGLE_TRY(image.initReinterpretedLayerImageView(
7643             contextVk, fetchType, aspectFlags, formatSwizzle,
7644             &mPerLevelLinearCopyImageViews[mCurrentMaxLevel.get()], baseLevel, levelCount,
7645             baseLayer, layerCount, imageUsageFlags, linearFormat));
7646     }
7647     if (srgbOverrideFormat != angle::FormatID::NONE &&
7648         !mPerLevelSRGBCopyImageViews[mCurrentMaxLevel.get()].valid())
7649     {
7650         ANGLE_TRY(image.initReinterpretedLayerImageView(
7651             contextVk, fetchType, aspectFlags, formatSwizzle,
7652             &mPerLevelSRGBCopyImageViews[mCurrentMaxLevel.get()], baseLevel, levelCount, baseLayer,
7653             layerCount, imageUsageFlags, srgbOverrideFormat));
7654     }
7655 
7656     return angle::Result::Continue;
7657 }
7658 
getLevelStorageImageView(ContextVk * contextVk,gl::TextureType viewType,const ImageHelper & image,LevelIndex levelVk,uint32_t layer,VkImageUsageFlags imageUsageFlags,angle::FormatID formatID,const ImageView ** imageViewOut)7659 angle::Result ImageViewHelper::getLevelStorageImageView(ContextVk *contextVk,
7660                                                         gl::TextureType viewType,
7661                                                         const ImageHelper &image,
7662                                                         LevelIndex levelVk,
7663                                                         uint32_t layer,
7664                                                         VkImageUsageFlags imageUsageFlags,
7665                                                         angle::FormatID formatID,
7666                                                         const ImageView **imageViewOut)
7667 {
7668     ASSERT(mImageViewSerial.valid());
7669 
7670     retain(&contextVk->getResourceUseList());
7671 
7672     ImageView *imageView =
7673         GetLevelImageView(&mLevelStorageImageViews, levelVk, image.getLevelCount());
7674 
7675     *imageViewOut = imageView;
7676     if (imageView->valid())
7677     {
7678         return angle::Result::Continue;
7679     }
7680 
7681     // Create the view.  Note that storage images are not affected by swizzle parameters.
7682     return image.initReinterpretedLayerImageView(contextVk, viewType, image.getAspectFlags(),
7683                                                  gl::SwizzleState(), imageView, levelVk, 1, layer,
7684                                                  image.getLayerCount(), imageUsageFlags, formatID);
7685 }
7686 
getLevelLayerStorageImageView(ContextVk * contextVk,const ImageHelper & image,LevelIndex levelVk,uint32_t layer,VkImageUsageFlags imageUsageFlags,angle::FormatID formatID,const ImageView ** imageViewOut)7687 angle::Result ImageViewHelper::getLevelLayerStorageImageView(ContextVk *contextVk,
7688                                                              const ImageHelper &image,
7689                                                              LevelIndex levelVk,
7690                                                              uint32_t layer,
7691                                                              VkImageUsageFlags imageUsageFlags,
7692                                                              angle::FormatID formatID,
7693                                                              const ImageView **imageViewOut)
7694 {
7695     ASSERT(image.valid());
7696     ASSERT(mImageViewSerial.valid());
7697     ASSERT(!image.getFormat().actualImageFormat().isBlock);
7698 
7699     retain(&contextVk->getResourceUseList());
7700 
7701     ImageView *imageView =
7702         GetLevelLayerImageView(&mLayerLevelStorageImageViews, levelVk, layer, image.getLevelCount(),
7703                                GetImageLayerCountForView(image));
7704     *imageViewOut = imageView;
7705 
7706     if (imageView->valid())
7707     {
7708         return angle::Result::Continue;
7709     }
7710 
7711     // Create the view.  Note that storage images are not affected by swizzle parameters.
7712     gl::TextureType viewType = Get2DTextureType(1, image.getSamples());
7713     return image.initReinterpretedLayerImageView(contextVk, viewType, image.getAspectFlags(),
7714                                                  gl::SwizzleState(), imageView, levelVk, 1, layer,
7715                                                  1, imageUsageFlags, formatID);
7716 }
7717 
getLevelDrawImageView(ContextVk * contextVk,const ImageHelper & image,LevelIndex levelVk,uint32_t layer,uint32_t layerCount,gl::SrgbWriteControlMode mode,const ImageView ** imageViewOut)7718 angle::Result ImageViewHelper::getLevelDrawImageView(ContextVk *contextVk,
7719                                                      const ImageHelper &image,
7720                                                      LevelIndex levelVk,
7721                                                      uint32_t layer,
7722                                                      uint32_t layerCount,
7723                                                      gl::SrgbWriteControlMode mode,
7724                                                      const ImageView **imageViewOut)
7725 {
7726     ASSERT(image.valid());
7727     ASSERT(mImageViewSerial.valid());
7728     ASSERT(!image.getFormat().actualImageFormat().isBlock);
7729 
7730     retain(&contextVk->getResourceUseList());
7731 
7732     ImageSubresourceRange range = MakeImageSubresourceDrawRange(
7733         image.toGLLevel(levelVk), layer, GetLayerMode(image, layerCount), mode);
7734 
7735     std::unique_ptr<ImageView> &view = mSubresourceDrawImageViews[range];
7736     if (view)
7737     {
7738         *imageViewOut = view.get();
7739         return angle::Result::Continue;
7740     }
7741 
7742     view          = std::make_unique<ImageView>();
7743     *imageViewOut = view.get();
7744 
7745     // Lazily allocate the image view.
7746     // Note that these views are specifically made to be used as framebuffer attachments, and
7747     // therefore don't have swizzle.
7748     gl::TextureType viewType = Get2DTextureType(layerCount, image.getSamples());
7749     return image.initLayerImageView(contextVk, viewType, image.getAspectFlags(), gl::SwizzleState(),
7750                                     view.get(), levelVk, 1, layer, layerCount, mode);
7751 }
7752 
getLevelLayerDrawImageView(ContextVk * contextVk,const ImageHelper & image,LevelIndex levelVk,uint32_t layer,gl::SrgbWriteControlMode mode,const ImageView ** imageViewOut)7753 angle::Result ImageViewHelper::getLevelLayerDrawImageView(ContextVk *contextVk,
7754                                                           const ImageHelper &image,
7755                                                           LevelIndex levelVk,
7756                                                           uint32_t layer,
7757                                                           gl::SrgbWriteControlMode mode,
7758                                                           const ImageView **imageViewOut)
7759 {
7760     ASSERT(image.valid());
7761     ASSERT(mImageViewSerial.valid());
7762     ASSERT(!image.getFormat().actualImageFormat().isBlock);
7763 
7764     retain(&contextVk->getResourceUseList());
7765 
7766     LayerLevelImageViewVector &imageViews = (mode == gl::SrgbWriteControlMode::Linear)
7767                                                 ? mLayerLevelDrawImageViewsLinear
7768                                                 : mLayerLevelDrawImageViews;
7769 
7770     // Lazily allocate the storage for image views
7771     ImageView *imageView = GetLevelLayerImageView(
7772         &imageViews, levelVk, layer, image.getLevelCount(), GetImageLayerCountForView(image));
7773     *imageViewOut = imageView;
7774 
7775     if (imageView->valid())
7776     {
7777         return angle::Result::Continue;
7778     }
7779 
7780     // Lazily allocate the image view itself.
7781     // Note that these views are specifically made to be used as framebuffer attachments, and
7782     // therefore don't have swizzle.
7783     gl::TextureType viewType = Get2DTextureType(1, image.getSamples());
7784     return image.initLayerImageView(contextVk, viewType, image.getAspectFlags(), gl::SwizzleState(),
7785                                     imageView, levelVk, 1, layer, 1, mode);
7786 }
7787 
getSubresourceSerial(gl::LevelIndex levelGL,uint32_t levelCount,uint32_t layer,LayerMode layerMode,SrgbDecodeMode srgbDecodeMode,gl::SrgbOverride srgbOverrideMode) const7788 ImageOrBufferViewSubresourceSerial ImageViewHelper::getSubresourceSerial(
7789     gl::LevelIndex levelGL,
7790     uint32_t levelCount,
7791     uint32_t layer,
7792     LayerMode layerMode,
7793     SrgbDecodeMode srgbDecodeMode,
7794     gl::SrgbOverride srgbOverrideMode) const
7795 {
7796     ASSERT(mImageViewSerial.valid());
7797 
7798     ImageOrBufferViewSubresourceSerial serial;
7799     serial.viewSerial  = mImageViewSerial;
7800     serial.subresource = MakeImageSubresourceReadRange(levelGL, levelCount, layer, layerMode,
7801                                                        srgbDecodeMode, srgbOverrideMode);
7802     return serial;
7803 }
7804 
MakeImageSubresourceReadRange(gl::LevelIndex level,uint32_t levelCount,uint32_t layer,LayerMode layerMode,SrgbDecodeMode srgbDecodeMode,gl::SrgbOverride srgbOverrideMode)7805 ImageSubresourceRange MakeImageSubresourceReadRange(gl::LevelIndex level,
7806                                                     uint32_t levelCount,
7807                                                     uint32_t layer,
7808                                                     LayerMode layerMode,
7809                                                     SrgbDecodeMode srgbDecodeMode,
7810                                                     gl::SrgbOverride srgbOverrideMode)
7811 {
7812     ImageSubresourceRange range;
7813 
7814     SetBitField(range.level, level.get());
7815     SetBitField(range.levelCount, levelCount);
7816     SetBitField(range.layer, layer);
7817     SetBitField(range.layerMode, layerMode);
7818     SetBitField(range.srgbDecodeMode, srgbDecodeMode);
7819     SetBitField(range.srgbMode, srgbOverrideMode);
7820 
7821     return range;
7822 }
7823 
MakeImageSubresourceDrawRange(gl::LevelIndex level,uint32_t layer,LayerMode layerMode,gl::SrgbWriteControlMode srgbWriteControlMode)7824 ImageSubresourceRange MakeImageSubresourceDrawRange(gl::LevelIndex level,
7825                                                     uint32_t layer,
7826                                                     LayerMode layerMode,
7827                                                     gl::SrgbWriteControlMode srgbWriteControlMode)
7828 {
7829     ImageSubresourceRange range;
7830 
7831     SetBitField(range.level, level.get());
7832     SetBitField(range.levelCount, 1);
7833     SetBitField(range.layer, layer);
7834     SetBitField(range.layerMode, layerMode);
7835     SetBitField(range.srgbDecodeMode, 0);
7836     SetBitField(range.srgbMode, srgbWriteControlMode);
7837 
7838     return range;
7839 }
7840 
7841 // BufferViewHelper implementation.
BufferViewHelper()7842 BufferViewHelper::BufferViewHelper() : mOffset(0), mSize(0) {}
7843 
BufferViewHelper(BufferViewHelper && other)7844 BufferViewHelper::BufferViewHelper(BufferViewHelper &&other) : Resource(std::move(other))
7845 {
7846     std::swap(mOffset, other.mOffset);
7847     std::swap(mSize, other.mSize);
7848     std::swap(mViews, other.mViews);
7849     std::swap(mViewSerial, other.mViewSerial);
7850 }
7851 
~BufferViewHelper()7852 BufferViewHelper::~BufferViewHelper() {}
7853 
init(RendererVk * renderer,VkDeviceSize offset,VkDeviceSize size)7854 void BufferViewHelper::init(RendererVk *renderer, VkDeviceSize offset, VkDeviceSize size)
7855 {
7856     ASSERT(mViews.empty());
7857 
7858     mOffset = offset;
7859     mSize   = size;
7860 
7861     if (!mViewSerial.valid())
7862     {
7863         mViewSerial = renderer->getResourceSerialFactory().generateImageOrBufferViewSerial();
7864     }
7865 }
7866 
release(RendererVk * renderer)7867 void BufferViewHelper::release(RendererVk *renderer)
7868 {
7869     std::vector<GarbageObject> garbage;
7870 
7871     for (auto &formatAndView : mViews)
7872     {
7873         BufferView &view = formatAndView.second;
7874         ASSERT(view.valid());
7875 
7876         garbage.emplace_back(GetGarbage(&view));
7877     }
7878 
7879     if (!garbage.empty())
7880     {
7881         renderer->collectGarbage(std::move(mUse), std::move(garbage));
7882 
7883         // Ensure the resource use is always valid.
7884         mUse.init();
7885 
7886         // Update image view serial.
7887         mViewSerial = renderer->getResourceSerialFactory().generateImageOrBufferViewSerial();
7888     }
7889 
7890     mViews.clear();
7891 
7892     mOffset = 0;
7893     mSize   = 0;
7894 }
7895 
destroy(VkDevice device)7896 void BufferViewHelper::destroy(VkDevice device)
7897 {
7898     for (auto &formatAndView : mViews)
7899     {
7900         BufferView &view = formatAndView.second;
7901         view.destroy(device);
7902     }
7903 
7904     mViews.clear();
7905 
7906     mOffset = 0;
7907     mSize   = 0;
7908 
7909     mViewSerial = kInvalidImageOrBufferViewSerial;
7910 }
7911 
getView(ContextVk * contextVk,const BufferHelper & buffer,VkDeviceSize bufferOffset,const Format & format,const BufferView ** viewOut)7912 angle::Result BufferViewHelper::getView(ContextVk *contextVk,
7913                                         const BufferHelper &buffer,
7914                                         VkDeviceSize bufferOffset,
7915                                         const Format &format,
7916                                         const BufferView **viewOut)
7917 {
7918     ASSERT(format.valid());
7919 
7920     VkFormat viewVkFormat = format.actualBufferVkFormat(false);
7921 
7922     auto iter = mViews.find(viewVkFormat);
7923     if (iter != mViews.end())
7924     {
7925         *viewOut = &iter->second;
7926         return angle::Result::Continue;
7927     }
7928 
7929     // If the size is not a multiple of pixelBytes, remove the extra bytes.  The last element cannot
7930     // be read anyway, and this is a requirement of Vulkan (for size to be a multiple of format
7931     // texel block size).
7932     const angle::Format &bufferFormat = format.actualBufferFormat(false);
7933     const GLuint pixelBytes           = bufferFormat.pixelBytes;
7934     VkDeviceSize size                 = mSize - mSize % pixelBytes;
7935 
7936     VkBufferViewCreateInfo viewCreateInfo = {};
7937     viewCreateInfo.sType                  = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
7938     viewCreateInfo.buffer                 = buffer.getBuffer().getHandle();
7939     viewCreateInfo.format                 = viewVkFormat;
7940     viewCreateInfo.offset                 = mOffset + bufferOffset;
7941     viewCreateInfo.range                  = size;
7942 
7943     BufferView view;
7944     ANGLE_VK_TRY(contextVk, view.init(contextVk->getDevice(), viewCreateInfo));
7945 
7946     // Cache the view
7947     auto insertIter = mViews.insert({viewVkFormat, std::move(view)});
7948     *viewOut        = &insertIter.first->second;
7949     ASSERT(insertIter.second);
7950 
7951     return angle::Result::Continue;
7952 }
7953 
getSerial() const7954 ImageOrBufferViewSubresourceSerial BufferViewHelper::getSerial() const
7955 {
7956     ASSERT(mViewSerial.valid());
7957 
7958     ImageOrBufferViewSubresourceSerial serial = {};
7959     serial.viewSerial                         = mViewSerial;
7960     return serial;
7961 }
7962 
7963 // ShaderProgramHelper implementation.
ShaderProgramHelper()7964 ShaderProgramHelper::ShaderProgramHelper() : mSpecializationConstants{} {}
7965 
7966 ShaderProgramHelper::~ShaderProgramHelper() = default;
7967 
valid(const gl::ShaderType shaderType) const7968 bool ShaderProgramHelper::valid(const gl::ShaderType shaderType) const
7969 {
7970     return mShaders[shaderType].valid();
7971 }
7972 
destroy(RendererVk * rendererVk)7973 void ShaderProgramHelper::destroy(RendererVk *rendererVk)
7974 {
7975     mGraphicsPipelines.destroy(rendererVk);
7976     mComputePipeline.destroy(rendererVk->getDevice());
7977     for (BindingPointer<ShaderAndSerial> &shader : mShaders)
7978     {
7979         shader.reset();
7980     }
7981 }
7982 
release(ContextVk * contextVk)7983 void ShaderProgramHelper::release(ContextVk *contextVk)
7984 {
7985     mGraphicsPipelines.release(contextVk);
7986     contextVk->addGarbage(&mComputePipeline.get());
7987     for (BindingPointer<ShaderAndSerial> &shader : mShaders)
7988     {
7989         shader.reset();
7990     }
7991 }
7992 
setShader(gl::ShaderType shaderType,RefCounted<ShaderAndSerial> * shader)7993 void ShaderProgramHelper::setShader(gl::ShaderType shaderType, RefCounted<ShaderAndSerial> *shader)
7994 {
7995     mShaders[shaderType].set(shader);
7996 }
7997 
setSpecializationConstant(sh::vk::SpecializationConstantId id,uint32_t value)7998 void ShaderProgramHelper::setSpecializationConstant(sh::vk::SpecializationConstantId id,
7999                                                     uint32_t value)
8000 {
8001     ASSERT(id < sh::vk::SpecializationConstantId::EnumCount);
8002     switch (id)
8003     {
8004         case sh::vk::SpecializationConstantId::LineRasterEmulation:
8005             mSpecializationConstants.lineRasterEmulation = value;
8006             break;
8007         case sh::vk::SpecializationConstantId::SurfaceRotation:
8008             mSpecializationConstants.surfaceRotation = value;
8009             break;
8010         case sh::vk::SpecializationConstantId::DrawableWidth:
8011             mSpecializationConstants.drawableWidth = static_cast<float>(value);
8012             break;
8013         case sh::vk::SpecializationConstantId::DrawableHeight:
8014             mSpecializationConstants.drawableHeight = static_cast<float>(value);
8015             break;
8016         default:
8017             UNREACHABLE();
8018             break;
8019     }
8020 }
8021 
getComputePipeline(Context * context,const PipelineLayout & pipelineLayout,PipelineAndSerial ** pipelineOut)8022 angle::Result ShaderProgramHelper::getComputePipeline(Context *context,
8023                                                       const PipelineLayout &pipelineLayout,
8024                                                       PipelineAndSerial **pipelineOut)
8025 {
8026     if (mComputePipeline.valid())
8027     {
8028         *pipelineOut = &mComputePipeline;
8029         return angle::Result::Continue;
8030     }
8031 
8032     RendererVk *renderer = context->getRenderer();
8033 
8034     VkPipelineShaderStageCreateInfo shaderStage = {};
8035     VkComputePipelineCreateInfo createInfo      = {};
8036 
8037     shaderStage.sType               = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
8038     shaderStage.flags               = 0;
8039     shaderStage.stage               = VK_SHADER_STAGE_COMPUTE_BIT;
8040     shaderStage.module              = mShaders[gl::ShaderType::Compute].get().get().getHandle();
8041     shaderStage.pName               = "main";
8042     shaderStage.pSpecializationInfo = nullptr;
8043 
8044     createInfo.sType              = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
8045     createInfo.flags              = 0;
8046     createInfo.stage              = shaderStage;
8047     createInfo.layout             = pipelineLayout.getHandle();
8048     createInfo.basePipelineHandle = VK_NULL_HANDLE;
8049     createInfo.basePipelineIndex  = 0;
8050 
8051     PipelineCache *pipelineCache = nullptr;
8052     ANGLE_TRY(renderer->getPipelineCache(&pipelineCache));
8053     ANGLE_VK_TRY(context, mComputePipeline.get().initCompute(context->getDevice(), createInfo,
8054                                                              *pipelineCache));
8055 
8056     *pipelineOut = &mComputePipeline;
8057     return angle::Result::Continue;
8058 }
8059 
8060 // ActiveHandleCounter implementation.
ActiveHandleCounter()8061 ActiveHandleCounter::ActiveHandleCounter() : mActiveCounts{}, mAllocatedCounts{} {}
8062 
8063 ActiveHandleCounter::~ActiveHandleCounter() = default;
8064 
8065 // CommandBufferAccess implementation.
8066 CommandBufferAccess::CommandBufferAccess()  = default;
8067 CommandBufferAccess::~CommandBufferAccess() = default;
8068 
onBufferRead(VkAccessFlags readAccessType,PipelineStage readStage,BufferHelper * buffer)8069 void CommandBufferAccess::onBufferRead(VkAccessFlags readAccessType,
8070                                        PipelineStage readStage,
8071                                        BufferHelper *buffer)
8072 {
8073     ASSERT(!buffer->isReleasedToExternal());
8074     mReadBuffers.emplace_back(buffer, readAccessType, readStage);
8075 }
8076 
onBufferWrite(VkAccessFlags writeAccessType,PipelineStage writeStage,BufferHelper * buffer)8077 void CommandBufferAccess::onBufferWrite(VkAccessFlags writeAccessType,
8078                                         PipelineStage writeStage,
8079                                         BufferHelper *buffer)
8080 {
8081     ASSERT(!buffer->isReleasedToExternal());
8082     mWriteBuffers.emplace_back(buffer, writeAccessType, writeStage);
8083 }
8084 
onImageRead(VkImageAspectFlags aspectFlags,ImageLayout imageLayout,ImageHelper * image)8085 void CommandBufferAccess::onImageRead(VkImageAspectFlags aspectFlags,
8086                                       ImageLayout imageLayout,
8087                                       ImageHelper *image)
8088 {
8089     ASSERT(!image->isReleasedToExternal());
8090     ASSERT(image->getImageSerial().valid());
8091     mReadImages.emplace_back(image, aspectFlags, imageLayout);
8092 }
8093 
onImageWrite(gl::LevelIndex levelStart,uint32_t levelCount,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags,ImageLayout imageLayout,ImageHelper * image)8094 void CommandBufferAccess::onImageWrite(gl::LevelIndex levelStart,
8095                                        uint32_t levelCount,
8096                                        uint32_t layerStart,
8097                                        uint32_t layerCount,
8098                                        VkImageAspectFlags aspectFlags,
8099                                        ImageLayout imageLayout,
8100                                        ImageHelper *image)
8101 {
8102     ASSERT(!image->isReleasedToExternal());
8103     ASSERT(image->getImageSerial().valid());
8104     mWriteImages.emplace_back(CommandBufferImageAccess{image, aspectFlags, imageLayout}, levelStart,
8105                               levelCount, layerStart, layerCount);
8106 }
8107 
8108 }  // namespace vk
8109 }  // namespace rx
8110