1 // Copyright 2018 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "VkDeviceMemory.hpp"
16 #include "VkBuffer.hpp"
17 #include "VkImage.hpp"
18 #include "Device/Blitter.hpp"
19 #include "Device/Surface.hpp"
20 #include <cstring>
21
22 namespace vk
23 {
24
Image(const VkImageCreateInfo * pCreateInfo,void * mem)25 Image::Image(const VkImageCreateInfo* pCreateInfo, void* mem) :
26 flags(pCreateInfo->flags),
27 imageType(pCreateInfo->imageType),
28 format(pCreateInfo->format),
29 extent(pCreateInfo->extent),
30 mipLevels(pCreateInfo->mipLevels),
31 arrayLayers(pCreateInfo->arrayLayers),
32 samples(pCreateInfo->samples),
33 tiling(pCreateInfo->tiling)
34 {
35 blitter = new sw::Blitter();
36 }
37
destroy(const VkAllocationCallbacks * pAllocator)38 void Image::destroy(const VkAllocationCallbacks* pAllocator)
39 {
40 delete blitter;
41 }
42
ComputeRequiredAllocationSize(const VkImageCreateInfo * pCreateInfo)43 size_t Image::ComputeRequiredAllocationSize(const VkImageCreateInfo* pCreateInfo)
44 {
45 return 0;
46 }
47
getMemoryRequirements() const48 const VkMemoryRequirements Image::getMemoryRequirements() const
49 {
50 VkMemoryRequirements memoryRequirements;
51 memoryRequirements.alignment = vk::REQUIRED_MEMORY_ALIGNMENT;
52 memoryRequirements.memoryTypeBits = vk::MEMORY_TYPE_GENERIC_BIT;
53 memoryRequirements.size = getStorageSize(flags);
54 return memoryRequirements;
55 }
56
bind(VkDeviceMemory pDeviceMemory,VkDeviceSize pMemoryOffset)57 void Image::bind(VkDeviceMemory pDeviceMemory, VkDeviceSize pMemoryOffset)
58 {
59 deviceMemory = Cast(pDeviceMemory);
60 memoryOffset = pMemoryOffset;
61 }
62
getSubresourceLayout(const VkImageSubresource * pSubresource,VkSubresourceLayout * pLayout) const63 void Image::getSubresourceLayout(const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout) const
64 {
65 uint32_t bpp = bytesPerTexel(flags);
66 pLayout->offset = getMemoryOffset(flags, pSubresource->mipLevel, pSubresource->arrayLayer);
67 pLayout->size = getMipLevelSize(flags, pSubresource->mipLevel);
68 pLayout->rowPitch = rowPitchBytes(flags, pSubresource->mipLevel);
69 pLayout->depthPitch = slicePitchBytes(flags, pSubresource->mipLevel);
70 pLayout->arrayPitch = getLayerSize(flags);
71 }
72
copyTo(VkImage dstImage,const VkImageCopy & pRegion)73 void Image::copyTo(VkImage dstImage, const VkImageCopy& pRegion)
74 {
75 // Image copy does not perform any conversion, it simply copies memory from
76 // an image to another image that has the same number of bytes per pixel.
77 Image* dst = Cast(dstImage);
78 int srcBytesPerTexel = bytesPerTexel(pRegion.srcSubresource.aspectMask);
79 ASSERT(srcBytesPerTexel == dst->bytesPerTexel(pRegion.dstSubresource.aspectMask));
80
81 if(!((pRegion.srcSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
82 (pRegion.srcSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) ||
83 (pRegion.srcSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT)) ||
84 (pRegion.srcSubresource.baseArrayLayer != 0) ||
85 (pRegion.srcSubresource.layerCount != 1))
86 {
87 UNIMPLEMENTED();
88 }
89
90 if(!((pRegion.dstSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
91 (pRegion.dstSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) ||
92 (pRegion.dstSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT)) ||
93 (pRegion.dstSubresource.baseArrayLayer != 0) ||
94 (pRegion.dstSubresource.layerCount != 1))
95 {
96 UNIMPLEMENTED();
97 }
98
99 const char* srcMem = static_cast<const char*>(getTexelPointer(pRegion.srcOffset, pRegion.srcSubresource));
100 char* dstMem = static_cast<char*>(dst->getTexelPointer(pRegion.dstOffset, pRegion.dstSubresource));
101
102 int srcRowPitchBytes = rowPitchBytes(pRegion.srcSubresource.aspectMask, pRegion.srcSubresource.mipLevel);
103 int srcSlicePitchBytes = slicePitchBytes(pRegion.srcSubresource.aspectMask, pRegion.srcSubresource.mipLevel);
104 int dstRowPitchBytes = dst->rowPitchBytes(pRegion.dstSubresource.aspectMask, pRegion.dstSubresource.mipLevel);
105 int dstSlicePitchBytes = dst->slicePitchBytes(pRegion.dstSubresource.aspectMask, pRegion.dstSubresource.mipLevel);
106
107 VkExtent3D srcExtent = getMipLevelExtent(pRegion.srcSubresource.mipLevel);
108 VkExtent3D dstExtent = dst->getMipLevelExtent(pRegion.dstSubresource.mipLevel);
109
110 bool isSinglePlane = (pRegion.extent.depth == 1);
111 bool isSingleLine = (pRegion.extent.height == 1) && isSinglePlane;
112 // In order to copy multiple lines using a single memcpy call, we
113 // have to make sure that we need to copy the entire line and that
114 // both source and destination lines have the same length in bytes
115 bool isEntireLine = (pRegion.extent.width == srcExtent.width) &&
116 (pRegion.extent.width == dstExtent.width) &&
117 (srcRowPitchBytes == dstRowPitchBytes);
118 // In order to copy multiple planes using a single memcpy call, we
119 // have to make sure that we need to copy the entire plane and that
120 // both source and destination planes have the same length in bytes
121 bool isEntirePlane = isEntireLine &&
122 (pRegion.extent.height == srcExtent.height) &&
123 (pRegion.extent.height == dstExtent.height) &&
124 (srcSlicePitchBytes == dstSlicePitchBytes);
125
126 if(isSingleLine) // Copy one line
127 {
128 memcpy(dstMem, srcMem, pRegion.extent.width * srcBytesPerTexel);
129 }
130 else if(isEntireLine && isSinglePlane) // Copy one plane
131 {
132 memcpy(dstMem, srcMem, pRegion.extent.height * srcRowPitchBytes);
133 }
134 else if(isEntirePlane) // Copy multiple planes
135 {
136 memcpy(dstMem, srcMem, pRegion.extent.depth * srcSlicePitchBytes);
137 }
138 else if(isEntireLine) // Copy plane by plane
139 {
140 for(uint32_t z = 0; z < pRegion.extent.depth; z++, dstMem += dstSlicePitchBytes, srcMem += srcSlicePitchBytes)
141 {
142 memcpy(dstMem, srcMem, pRegion.extent.height * srcRowPitchBytes);
143 }
144 }
145 else // Copy line by line
146 {
147 for(uint32_t z = 0; z < pRegion.extent.depth; z++)
148 {
149 for(uint32_t y = 0; y < pRegion.extent.height; y++, dstMem += dstRowPitchBytes, srcMem += srcRowPitchBytes)
150 {
151 memcpy(dstMem, srcMem, pRegion.extent.width * srcBytesPerTexel);
152 }
153 }
154 }
155 }
156
copy(VkBuffer buffer,const VkBufferImageCopy & region,bool bufferIsSource)157 void Image::copy(VkBuffer buffer, const VkBufferImageCopy& region, bool bufferIsSource)
158 {
159 if(!((region.imageSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
160 (region.imageSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) ||
161 (region.imageSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT)))
162 {
163 UNIMPLEMENTED();
164 }
165
166 VkExtent3D mipLevelExtent = getMipLevelExtent(region.imageSubresource.mipLevel);
167 int imageBytesPerTexel = bytesPerTexel(region.imageSubresource.aspectMask);
168 int imageRowPitchBytes = rowPitchBytes(region.imageSubresource.aspectMask, region.imageSubresource.mipLevel);
169 int imageSlicePitchBytes = slicePitchBytes(region.imageSubresource.aspectMask, region.imageSubresource.mipLevel);
170 int bufferRowPitchBytes = ((region.bufferRowLength == 0) ? region.imageExtent.width : region.bufferRowLength) *
171 imageBytesPerTexel;
172 int bufferSlicePitchBytes = (((region.bufferImageHeight == 0) || (region.bufferRowLength == 0))) ?
173 region.imageExtent.height * bufferRowPitchBytes :
174 (region.bufferImageHeight * region.bufferRowLength) * imageBytesPerTexel;
175
176 int srcSlicePitchBytes = bufferIsSource ? bufferSlicePitchBytes : imageSlicePitchBytes;
177 int dstSlicePitchBytes = bufferIsSource ? imageSlicePitchBytes : bufferSlicePitchBytes;
178 int srcRowPitchBytes = bufferIsSource ? bufferRowPitchBytes : imageRowPitchBytes;
179 int dstRowPitchBytes = bufferIsSource ? imageRowPitchBytes : bufferRowPitchBytes;
180
181 bool isSinglePlane = (region.imageExtent.depth == 1);
182 bool isSingleLine = (region.imageExtent.height == 1) && isSinglePlane;
183 bool isEntireLine = (region.imageExtent.width == mipLevelExtent.width) &&
184 (imageRowPitchBytes == bufferRowPitchBytes);
185 bool isEntirePlane = isEntireLine && (region.imageExtent.height == mipLevelExtent.height) &&
186 (imageSlicePitchBytes == bufferSlicePitchBytes);
187
188 VkDeviceSize layerSize = getLayerSize(flags);
189 char* bufferMemory = static_cast<char*>(Cast(buffer)->getOffsetPointer(region.bufferOffset));
190 char* imageMemory = static_cast<char*>(deviceMemory->getOffsetPointer(
191 getMemoryOffset(region.imageSubresource.aspectMask, region.imageSubresource.mipLevel,
192 region.imageSubresource.baseArrayLayer) +
193 texelOffsetBytesInStorage(region.imageOffset, region.imageSubresource)));
194 char* srcMemory = bufferIsSource ? bufferMemory : imageMemory;
195 char* dstMemory = bufferIsSource ? imageMemory : bufferMemory;
196
197 VkDeviceSize copySize = 0;
198 if(isSingleLine)
199 {
200 copySize = region.imageExtent.width * imageBytesPerTexel;
201 }
202 else if(isEntireLine && isSinglePlane)
203 {
204 copySize = region.imageExtent.height * imageRowPitchBytes;
205 }
206 else if(isEntirePlane)
207 {
208 copySize = region.imageExtent.depth * imageSlicePitchBytes; // Copy multiple planes
209 }
210 else if(isEntireLine) // Copy plane by plane
211 {
212 copySize = region.imageExtent.height * imageRowPitchBytes;
213 }
214 else // Copy line by line
215 {
216 copySize = region.imageExtent.width * imageBytesPerTexel;
217 }
218
219 for(uint32_t i = 0; i < region.imageSubresource.layerCount; i++)
220 {
221 if(isSingleLine || (isEntireLine && isSinglePlane) || isEntirePlane)
222 {
223 memcpy(dstMemory, srcMemory, copySize);
224 }
225 else if(isEntireLine) // Copy plane by plane
226 {
227 for(uint32_t z = 0; z < region.imageExtent.depth; z++)
228 {
229 memcpy(dstMemory, srcMemory, copySize);
230 srcMemory += srcSlicePitchBytes;
231 dstMemory += dstSlicePitchBytes;
232 }
233 }
234 else // Copy line by line
235 {
236 for(uint32_t z = 0; z < region.imageExtent.depth; z++)
237 {
238 for(uint32_t y = 0; y < region.imageExtent.height; y++)
239 {
240 memcpy(dstMemory, srcMemory, copySize);
241 srcMemory += srcRowPitchBytes;
242 dstMemory += dstRowPitchBytes;
243 }
244 }
245 }
246
247 srcMemory += layerSize;
248 dstMemory += layerSize;
249 }
250 }
251
copyTo(VkBuffer dstBuffer,const VkBufferImageCopy & region)252 void Image::copyTo(VkBuffer dstBuffer, const VkBufferImageCopy& region)
253 {
254 copy(dstBuffer, region, false);
255 }
256
copyFrom(VkBuffer srcBuffer,const VkBufferImageCopy & region)257 void Image::copyFrom(VkBuffer srcBuffer, const VkBufferImageCopy& region)
258 {
259 copy(srcBuffer, region, true);
260 }
261
getTexelPointer(const VkOffset3D & offset,const VkImageSubresourceLayers & subresource) const262 void* Image::getTexelPointer(const VkOffset3D& offset, const VkImageSubresourceLayers& subresource) const
263 {
264 return deviceMemory->getOffsetPointer(texelOffsetBytesInStorage(offset, subresource) +
265 getMemoryOffset(flags, subresource.mipLevel, subresource.baseArrayLayer));
266 }
267
texelOffsetBytesInStorage(const VkOffset3D & offset,const VkImageSubresourceLayers & subresource) const268 VkDeviceSize Image::texelOffsetBytesInStorage(const VkOffset3D& offset, const VkImageSubresourceLayers& subresource) const
269 {
270 return offset.z * slicePitchBytes(flags, subresource.mipLevel) +
271 offset.y * rowPitchBytes(flags, subresource.mipLevel) +
272 offset.x * bytesPerTexel(flags);
273 }
274
getMipLevelExtent(uint32_t mipLevel) const275 VkExtent3D Image::getMipLevelExtent(uint32_t mipLevel) const
276 {
277 VkExtent3D mipLevelExtent;
278 mipLevelExtent.width = extent.width >> mipLevel;
279 mipLevelExtent.height = extent.height >> mipLevel;
280 mipLevelExtent.depth = extent.depth >> mipLevel;
281
282 if(mipLevelExtent.width == 0)
283 {
284 mipLevelExtent.width = 1;
285 }
286 if(mipLevelExtent.height == 0)
287 {
288 mipLevelExtent.height = 1;
289 }
290 if(mipLevelExtent.depth == 0)
291 {
292 mipLevelExtent.depth = 1;
293 }
294 return mipLevelExtent;
295 }
296
rowPitchBytes(const VkImageAspectFlags & flags,uint32_t mipLevel) const297 int Image::rowPitchBytes(const VkImageAspectFlags& flags, uint32_t mipLevel) const
298 {
299 // Depth and Stencil pitch should be computed separately
300 ASSERT((flags & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) !=
301 (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
302 return sw::Surface::pitchB(getMipLevelExtent(mipLevel).width, isCube() ? 1 : 0, getFormat(flags), false);
303 }
304
slicePitchBytes(const VkImageAspectFlags & flags,uint32_t mipLevel) const305 int Image::slicePitchBytes(const VkImageAspectFlags& flags, uint32_t mipLevel) const
306 {
307 // Depth and Stencil slice should be computed separately
308 ASSERT((flags & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) !=
309 (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
310 VkExtent3D mipLevelExtent = getMipLevelExtent(mipLevel);
311 return sw::Surface::sliceB(mipLevelExtent.width, mipLevelExtent.height, isCube() ? 1 : 0, getFormat(flags), false);
312 }
313
bytesPerTexel(const VkImageAspectFlags & flags) const314 int Image::bytesPerTexel(const VkImageAspectFlags& flags) const
315 {
316 // Depth and Stencil bytes should be computed separately
317 ASSERT((flags & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) !=
318 (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT));
319 return sw::Surface::bytes(getFormat(flags));
320 }
321
getFormat(const VkImageAspectFlags & flags) const322 VkFormat Image::getFormat(const VkImageAspectFlags& flags) const
323 {
324 switch(flags)
325 {
326 case VK_IMAGE_ASPECT_DEPTH_BIT:
327 switch(format)
328 {
329 case VK_FORMAT_D16_UNORM_S8_UINT:
330 return VK_FORMAT_D16_UNORM;
331 case VK_FORMAT_D24_UNORM_S8_UINT:
332 return VK_FORMAT_X8_D24_UNORM_PACK32; // FIXME: This will allocate an extra byte per pixel
333 case VK_FORMAT_D32_SFLOAT_S8_UINT:
334 return VK_FORMAT_D32_SFLOAT;
335 default:
336 break;
337 }
338 break;
339 case VK_IMAGE_ASPECT_STENCIL_BIT:
340 switch(format)
341 {
342 case VK_FORMAT_D16_UNORM_S8_UINT:
343 case VK_FORMAT_D24_UNORM_S8_UINT:
344 case VK_FORMAT_D32_SFLOAT_S8_UINT:
345 return VK_FORMAT_S8_UINT;
346 default:
347 break;
348 }
349 break;
350 default:
351 break;
352 }
353
354 return format;
355 }
356
isCube() const357 bool Image::isCube() const
358 {
359 return (flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) && (imageType == VK_IMAGE_TYPE_2D);
360 }
361
getMemoryOffset(const VkImageAspectFlags & flags) const362 VkDeviceSize Image::getMemoryOffset(const VkImageAspectFlags& flags) const
363 {
364 switch(format)
365 {
366 case VK_FORMAT_D16_UNORM_S8_UINT:
367 case VK_FORMAT_D24_UNORM_S8_UINT:
368 case VK_FORMAT_D32_SFLOAT_S8_UINT:
369 if(flags == VK_IMAGE_ASPECT_STENCIL_BIT)
370 {
371 // Offset by depth buffer to get to stencil buffer
372 return memoryOffset + getStorageSize(VK_IMAGE_ASPECT_DEPTH_BIT);
373 }
374 break;
375 default:
376 break;
377 }
378
379 return memoryOffset;
380 }
381
getMemoryOffset(const VkImageAspectFlags & flags,uint32_t mipLevel) const382 VkDeviceSize Image::getMemoryOffset(const VkImageAspectFlags& flags, uint32_t mipLevel) const
383 {
384 VkDeviceSize offset = getMemoryOffset(flags);
385 for(uint32_t i = 0; i < mipLevel; ++i)
386 {
387 offset += getMipLevelSize(flags, i);
388 }
389 return offset;
390 }
391
getMemoryOffset(const VkImageAspectFlags & flags,uint32_t mipLevel,uint32_t layer) const392 VkDeviceSize Image::getMemoryOffset(const VkImageAspectFlags& flags, uint32_t mipLevel, uint32_t layer) const
393 {
394 return layer * getLayerSize(flags) + getMemoryOffset(flags, mipLevel);
395 }
396
getMipLevelSize(const VkImageAspectFlags & flags,uint32_t mipLevel) const397 VkDeviceSize Image::getMipLevelSize(const VkImageAspectFlags& flags, uint32_t mipLevel) const
398 {
399 int slicePitchB = 0;
400 if(sw::Surface::isDepth(format) && sw::Surface::isStencil(format))
401 {
402 switch(flags)
403 {
404 case VK_IMAGE_ASPECT_DEPTH_BIT:
405 case VK_IMAGE_ASPECT_STENCIL_BIT:
406 slicePitchB = slicePitchBytes(flags, mipLevel);
407 break;
408 default:
409 // Allow allocating both depth and stencil contiguously
410 slicePitchB = (slicePitchBytes(VK_IMAGE_ASPECT_DEPTH_BIT, mipLevel) +
411 slicePitchBytes(VK_IMAGE_ASPECT_STENCIL_BIT, mipLevel));
412 break;
413 }
414 }
415 else
416 {
417 slicePitchB = slicePitchBytes(flags, mipLevel);
418 }
419
420 return getMipLevelExtent(mipLevel).depth * slicePitchB;
421 }
422
getLayerSize(const VkImageAspectFlags & flags) const423 VkDeviceSize Image::getLayerSize(const VkImageAspectFlags& flags) const
424 {
425 VkDeviceSize layerSize = 0;
426
427 for(uint32_t mipLevel = 0; mipLevel < mipLevels; ++mipLevel)
428 {
429 layerSize += getMipLevelSize(flags, mipLevel);
430 }
431
432 return layerSize;
433 }
434
getStorageSize(const VkImageAspectFlags & flags) const435 VkDeviceSize Image::getStorageSize(const VkImageAspectFlags& flags) const
436 {
437 return arrayLayers * getLayerSize(flags);
438 }
439
asSurface(const VkImageAspectFlags & flags,uint32_t mipLevel,uint32_t layer) const440 sw::Surface* Image::asSurface(const VkImageAspectFlags& flags, uint32_t mipLevel, uint32_t layer) const
441 {
442 VkExtent3D mipLevelExtent = getMipLevelExtent(mipLevel);
443 return sw::Surface::create(mipLevelExtent.width, mipLevelExtent.height, mipLevelExtent.depth, getFormat(flags),
444 deviceMemory->getOffsetPointer(getMemoryOffset(flags, mipLevel, layer)),
445 rowPitchBytes(flags, mipLevel), slicePitchBytes(flags, mipLevel));
446 }
447
blit(VkImage dstImage,const VkImageBlit & region,VkFilter filter)448 void Image::blit(VkImage dstImage, const VkImageBlit& region, VkFilter filter)
449 {
450 VkImageAspectFlags srcFlags = region.srcSubresource.aspectMask;
451 VkImageAspectFlags dstFlags = region.dstSubresource.aspectMask;
452 if((region.srcSubresource.baseArrayLayer != 0) ||
453 (region.dstSubresource.baseArrayLayer != 0) ||
454 (region.srcSubresource.layerCount != 1) ||
455 (region.dstSubresource.layerCount != 1) ||
456 (srcFlags != dstFlags))
457 {
458 UNIMPLEMENTED();
459 }
460
461 int32_t numSlices = (region.srcOffsets[1].z - region.srcOffsets[0].z);
462 ASSERT(numSlices == (region.dstOffsets[1].z - region.dstOffsets[0].z));
463
464 sw::Surface* srcSurface = asSurface(srcFlags, region.srcSubresource.mipLevel, 0);
465 sw::Surface* dstSurface = Cast(dstImage)->asSurface(dstFlags, region.dstSubresource.mipLevel, 0);
466
467 sw::SliceRectF sRect(static_cast<float>(region.srcOffsets[0].x), static_cast<float>(region.srcOffsets[0].y),
468 static_cast<float>(region.srcOffsets[1].x), static_cast<float>(region.srcOffsets[1].y),
469 region.srcOffsets[0].z);
470
471 sw::SliceRect dRect(region.dstOffsets[0].x, region.dstOffsets[0].y,
472 region.dstOffsets[1].x, region.dstOffsets[1].y, region.dstOffsets[0].z);
473
474 for(int i = 0; i < numSlices; i++)
475 {
476 blitter->blit(srcSurface, sRect, dstSurface, dRect,
477 {filter != VK_FILTER_NEAREST, srcFlags == VK_IMAGE_ASPECT_STENCIL_BIT, false});
478 sRect.slice++;
479 dRect.slice++;
480 }
481
482 delete srcSurface;
483 delete dstSurface;
484 }
485
getClearFormat() const486 VkFormat Image::getClearFormat() const
487 {
488 // Set the proper format for the clear value, as described here:
489 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#clears-values
490 if(sw::Surface::isSignedNonNormalizedInteger(format))
491 {
492 return VK_FORMAT_R32G32B32A32_SINT;
493 }
494 else if(sw::Surface::isUnsignedNonNormalizedInteger(format))
495 {
496 return VK_FORMAT_R32G32B32A32_UINT;
497 }
498
499 return VK_FORMAT_R32G32B32A32_SFLOAT;
500 }
501
getLastLayerIndex(const VkImageSubresourceRange & subresourceRange) const502 uint32_t Image::getLastLayerIndex(const VkImageSubresourceRange& subresourceRange) const
503 {
504 return ((subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS) ?
505 arrayLayers : (subresourceRange.baseArrayLayer + subresourceRange.layerCount)) - 1;
506 }
507
getLastMipLevel(const VkImageSubresourceRange & subresourceRange) const508 uint32_t Image::getLastMipLevel(const VkImageSubresourceRange& subresourceRange) const
509 {
510 return ((subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS) ?
511 mipLevels : (subresourceRange.baseMipLevel + subresourceRange.levelCount)) - 1;
512 }
513
clear(void * pixelData,VkFormat format,const VkImageSubresourceRange & subresourceRange,VkImageAspectFlags aspectMask)514 void Image::clear(void* pixelData, VkFormat format, const VkImageSubresourceRange& subresourceRange, VkImageAspectFlags aspectMask)
515 {
516 uint32_t firstLayer = subresourceRange.baseArrayLayer;
517 uint32_t lastLayer = getLastLayerIndex(subresourceRange);
518 for(uint32_t layer = firstLayer; layer <= lastLayer; ++layer)
519 {
520 uint32_t lastLevel = getLastMipLevel(subresourceRange);
521 for(uint32_t mipLevel = subresourceRange.baseMipLevel; mipLevel <= lastLevel; ++mipLevel)
522 {
523 VkExtent3D mipLevelExtent = getMipLevelExtent(mipLevel);
524 for(uint32_t s = 0; s < mipLevelExtent.depth; ++s)
525 {
526 const sw::SliceRect dRect(0, 0, mipLevelExtent.width, mipLevelExtent.height, s);
527 sw::Surface* surface = asSurface(aspectMask, mipLevel, layer);
528 blitter->clear(pixelData, format, surface, dRect, 0xF);
529 delete surface;
530 }
531 }
532 }
533 }
534
clear(void * pixelData,VkFormat format,const VkRect2D & renderArea,const VkImageSubresourceRange & subresourceRange,VkImageAspectFlags aspectMask)535 void Image::clear(void* pixelData, VkFormat format, const VkRect2D& renderArea, const VkImageSubresourceRange& subresourceRange, VkImageAspectFlags aspectMask)
536 {
537 if((subresourceRange.baseMipLevel != 0) ||
538 (subresourceRange.levelCount != 1))
539 {
540 UNIMPLEMENTED();
541 }
542
543 sw::SliceRect dRect(renderArea.offset.x, renderArea.offset.y,
544 renderArea.offset.x + renderArea.extent.width,
545 renderArea.offset.y + renderArea.extent.height, 0);
546
547 uint32_t firstLayer = subresourceRange.baseArrayLayer;
548 uint32_t lastLayer = getLastLayerIndex(subresourceRange);
549 for(uint32_t layer = firstLayer; layer <= lastLayer; ++layer)
550 {
551 for(uint32_t s = 0; s < extent.depth; ++s)
552 {
553 dRect.slice = s;
554 sw::Surface* surface = asSurface(aspectMask, 0, layer);
555 blitter->clear(pixelData, format, surface, dRect, 0xF);
556 delete surface;
557 }
558 }
559 }
560
clear(const VkClearColorValue & color,const VkImageSubresourceRange & subresourceRange)561 void Image::clear(const VkClearColorValue& color, const VkImageSubresourceRange& subresourceRange)
562 {
563 if(!(subresourceRange.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT))
564 {
565 UNIMPLEMENTED();
566 }
567
568 clear((void*)color.float32, getClearFormat(), subresourceRange, VK_IMAGE_ASPECT_COLOR_BIT);
569 }
570
clear(const VkClearDepthStencilValue & color,const VkImageSubresourceRange & subresourceRange)571 void Image::clear(const VkClearDepthStencilValue& color, const VkImageSubresourceRange& subresourceRange)
572 {
573 if((subresourceRange.aspectMask & ~(VK_IMAGE_ASPECT_DEPTH_BIT |
574 VK_IMAGE_ASPECT_STENCIL_BIT)) != 0)
575 {
576 UNIMPLEMENTED();
577 }
578
579 if(subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT)
580 {
581 clear((void*)(&color.depth), VK_FORMAT_D32_SFLOAT, subresourceRange, VK_IMAGE_ASPECT_DEPTH_BIT);
582 }
583
584 if(subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)
585 {
586 clear((void*)(&color.stencil), VK_FORMAT_S8_UINT, subresourceRange, VK_IMAGE_ASPECT_STENCIL_BIT);
587 }
588 }
589
clear(const VkClearValue & clearValue,const VkRect2D & renderArea,const VkImageSubresourceRange & subresourceRange)590 void Image::clear(const VkClearValue& clearValue, const VkRect2D& renderArea, const VkImageSubresourceRange& subresourceRange)
591 {
592 if(!((subresourceRange.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) ||
593 (subresourceRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT |
594 VK_IMAGE_ASPECT_STENCIL_BIT))) ||
595 (subresourceRange.baseMipLevel != 0) ||
596 (subresourceRange.levelCount != 1))
597 {
598 UNIMPLEMENTED();
599 }
600
601 if(subresourceRange.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT)
602 {
603 clear((void*)(clearValue.color.float32), getClearFormat(), renderArea, subresourceRange, VK_IMAGE_ASPECT_COLOR_BIT);
604 }
605 else
606 {
607 if(subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT)
608 {
609 clear((void*)(&clearValue.depthStencil.depth), VK_FORMAT_D32_SFLOAT, renderArea, subresourceRange, VK_IMAGE_ASPECT_DEPTH_BIT);
610 }
611
612 if(subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)
613 {
614 clear((void*)(&clearValue.depthStencil.stencil), VK_FORMAT_S8_UINT, renderArea, subresourceRange, VK_IMAGE_ASPECT_STENCIL_BIT);
615 }
616 }
617 }
618
619 } // namespace vk