1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "wsi_common_private.h"
25 #include "wsi_common_entrypoints.h"
26 #include "util/u_debug.h"
27 #include "util/macros.h"
28 #include "util/os_file.h"
29 #include "util/os_time.h"
30 #include "util/xmlconfig.h"
31 #include "vk_device.h"
32 #include "vk_fence.h"
33 #include "vk_format.h"
34 #include "vk_instance.h"
35 #include "vk_physical_device.h"
36 #include "vk_queue.h"
37 #include "vk_semaphore.h"
38 #include "vk_sync.h"
39 #include "vk_sync_dummy.h"
40 #include "vk_util.h"
41 
42 #include <time.h>
43 #include <stdlib.h>
44 #include <stdio.h>
45 
46 #ifndef _WIN32
47 #include <unistd.h>
48 #endif
49 
50 uint64_t WSI_DEBUG;
51 
52 static const struct debug_control debug_control[] = {
53    { "buffer",       WSI_DEBUG_BUFFER },
54    { "sw",           WSI_DEBUG_SW },
55    { "noshm",        WSI_DEBUG_NOSHM },
56    { "linear",       WSI_DEBUG_LINEAR },
57    { "dxgi",         WSI_DEBUG_DXGI },
58    { NULL, },
59 };
60 
61 VkResult
wsi_device_init(struct wsi_device * wsi,VkPhysicalDevice pdevice,WSI_FN_GetPhysicalDeviceProcAddr proc_addr,const VkAllocationCallbacks * alloc,int display_fd,const struct driOptionCache * dri_options,const struct wsi_device_options * device_options)62 wsi_device_init(struct wsi_device *wsi,
63                 VkPhysicalDevice pdevice,
64                 WSI_FN_GetPhysicalDeviceProcAddr proc_addr,
65                 const VkAllocationCallbacks *alloc,
66                 int display_fd,
67                 const struct driOptionCache *dri_options,
68                 const struct wsi_device_options *device_options)
69 {
70    const char *present_mode;
71    UNUSED VkResult result;
72 
73    WSI_DEBUG = parse_debug_string(getenv("MESA_VK_WSI_DEBUG"), debug_control);
74 
75    util_cpu_trace_init();
76 
77    memset(wsi, 0, sizeof(*wsi));
78 
79    wsi->instance_alloc = *alloc;
80    wsi->pdevice = pdevice;
81    wsi->supports_scanout = true;
82    wsi->sw = device_options->sw_device || (WSI_DEBUG & WSI_DEBUG_SW);
83    wsi->wants_linear = (WSI_DEBUG & WSI_DEBUG_LINEAR) != 0;
84    wsi->x11.extra_xwayland_image = device_options->extra_xwayland_image;
85 #define WSI_GET_CB(func) \
86    PFN_vk##func func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
87    WSI_GET_CB(GetPhysicalDeviceExternalSemaphoreProperties);
88    WSI_GET_CB(GetPhysicalDeviceProperties2);
89    WSI_GET_CB(GetPhysicalDeviceMemoryProperties);
90    WSI_GET_CB(GetPhysicalDeviceQueueFamilyProperties);
91 #undef WSI_GET_CB
92 
93    wsi->drm_info.sType =
94       VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRM_PROPERTIES_EXT;
95    wsi->pci_bus_info.sType =
96       VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT;
97    wsi->pci_bus_info.pNext = &wsi->drm_info;
98    VkPhysicalDeviceProperties2 pdp2 = {
99       .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2,
100       .pNext = &wsi->pci_bus_info,
101    };
102    GetPhysicalDeviceProperties2(pdevice, &pdp2);
103 
104    wsi->maxImageDimension2D = pdp2.properties.limits.maxImageDimension2D;
105    assert(pdp2.properties.limits.optimalBufferCopyRowPitchAlignment <= UINT32_MAX);
106    wsi->optimalBufferCopyRowPitchAlignment =
107       pdp2.properties.limits.optimalBufferCopyRowPitchAlignment;
108    wsi->override_present_mode = VK_PRESENT_MODE_MAX_ENUM_KHR;
109 
110    GetPhysicalDeviceMemoryProperties(pdevice, &wsi->memory_props);
111    GetPhysicalDeviceQueueFamilyProperties(pdevice, &wsi->queue_family_count, NULL);
112 
113    for (VkExternalSemaphoreHandleTypeFlags handle_type = 1;
114         handle_type <= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
115         handle_type <<= 1) {
116       const VkPhysicalDeviceExternalSemaphoreInfo esi = {
117          .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO,
118          .handleType = handle_type,
119       };
120       VkExternalSemaphoreProperties esp = {
121          .sType = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES,
122       };
123       GetPhysicalDeviceExternalSemaphoreProperties(pdevice, &esi, &esp);
124 
125       if (esp.externalSemaphoreFeatures &
126           VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT)
127          wsi->semaphore_export_handle_types |= handle_type;
128    }
129 
130    const struct vk_device_extension_table *supported_extensions =
131       &vk_physical_device_from_handle(pdevice)->supported_extensions;
132    wsi->has_import_memory_host =
133       supported_extensions->EXT_external_memory_host;
134    wsi->khr_present_wait =
135       supported_extensions->KHR_present_id &&
136       supported_extensions->KHR_present_wait;
137 
138    /* We cannot expose KHR_present_wait without timeline semaphores. */
139    assert(!wsi->khr_present_wait || supported_extensions->KHR_timeline_semaphore);
140 
141    list_inithead(&wsi->hotplug_fences);
142 
143 #define WSI_GET_CB(func) \
144    wsi->func = (PFN_vk##func)proc_addr(pdevice, "vk" #func)
145    WSI_GET_CB(AllocateMemory);
146    WSI_GET_CB(AllocateCommandBuffers);
147    WSI_GET_CB(BindBufferMemory);
148    WSI_GET_CB(BindImageMemory);
149    WSI_GET_CB(BeginCommandBuffer);
150    WSI_GET_CB(CmdPipelineBarrier);
151    WSI_GET_CB(CmdCopyImage);
152    WSI_GET_CB(CmdCopyImageToBuffer);
153    WSI_GET_CB(CreateBuffer);
154    WSI_GET_CB(CreateCommandPool);
155    WSI_GET_CB(CreateFence);
156    WSI_GET_CB(CreateImage);
157    WSI_GET_CB(CreateSemaphore);
158    WSI_GET_CB(DestroyBuffer);
159    WSI_GET_CB(DestroyCommandPool);
160    WSI_GET_CB(DestroyFence);
161    WSI_GET_CB(DestroyImage);
162    WSI_GET_CB(DestroySemaphore);
163    WSI_GET_CB(EndCommandBuffer);
164    WSI_GET_CB(FreeMemory);
165    WSI_GET_CB(FreeCommandBuffers);
166    WSI_GET_CB(GetBufferMemoryRequirements);
167    WSI_GET_CB(GetFenceStatus);
168    WSI_GET_CB(GetImageDrmFormatModifierPropertiesEXT);
169    WSI_GET_CB(GetImageMemoryRequirements);
170    WSI_GET_CB(GetImageSubresourceLayout);
171    if (!wsi->sw)
172       WSI_GET_CB(GetMemoryFdKHR);
173    WSI_GET_CB(GetPhysicalDeviceFormatProperties);
174    WSI_GET_CB(GetPhysicalDeviceFormatProperties2KHR);
175    WSI_GET_CB(GetPhysicalDeviceImageFormatProperties2);
176    WSI_GET_CB(GetSemaphoreFdKHR);
177    WSI_GET_CB(ResetFences);
178    WSI_GET_CB(QueueSubmit);
179    WSI_GET_CB(WaitForFences);
180    WSI_GET_CB(MapMemory);
181    WSI_GET_CB(UnmapMemory);
182    if (wsi->khr_present_wait)
183       WSI_GET_CB(WaitSemaphoresKHR);
184 #undef WSI_GET_CB
185 
186 #ifdef VK_USE_PLATFORM_XCB_KHR
187    result = wsi_x11_init_wsi(wsi, alloc, dri_options);
188    if (result != VK_SUCCESS)
189       goto fail;
190 #endif
191 
192 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
193    result = wsi_wl_init_wsi(wsi, alloc, pdevice);
194    if (result != VK_SUCCESS)
195       goto fail;
196 #endif
197 
198 #ifdef VK_USE_PLATFORM_WIN32_KHR
199    result = wsi_win32_init_wsi(wsi, alloc, pdevice);
200    if (result != VK_SUCCESS)
201       goto fail;
202 #endif
203 
204 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
205    result = wsi_display_init_wsi(wsi, alloc, display_fd);
206    if (result != VK_SUCCESS)
207       goto fail;
208 #endif
209 
210 #ifndef VK_USE_PLATFORM_WIN32_KHR
211    result = wsi_headless_init_wsi(wsi, alloc, pdevice);
212    if (result != VK_SUCCESS)
213       goto fail;
214 #endif
215 
216    present_mode = getenv("MESA_VK_WSI_PRESENT_MODE");
217    if (present_mode) {
218       if (!strcmp(present_mode, "fifo")) {
219          wsi->override_present_mode = VK_PRESENT_MODE_FIFO_KHR;
220       } else if (!strcmp(present_mode, "relaxed")) {
221           wsi->override_present_mode = VK_PRESENT_MODE_FIFO_RELAXED_KHR;
222       } else if (!strcmp(present_mode, "mailbox")) {
223          wsi->override_present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
224       } else if (!strcmp(present_mode, "immediate")) {
225          wsi->override_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
226       } else {
227          fprintf(stderr, "Invalid MESA_VK_WSI_PRESENT_MODE value!\n");
228       }
229    }
230 
231    wsi->force_headless_swapchain =
232       debug_get_bool_option("MESA_VK_WSI_HEADLESS_SWAPCHAIN", false);
233 
234    if (dri_options) {
235       if (driCheckOption(dri_options, "adaptive_sync", DRI_BOOL))
236          wsi->enable_adaptive_sync = driQueryOptionb(dri_options,
237                                                      "adaptive_sync");
238 
239       if (driCheckOption(dri_options, "vk_wsi_force_bgra8_unorm_first",  DRI_BOOL)) {
240          wsi->force_bgra8_unorm_first =
241             driQueryOptionb(dri_options, "vk_wsi_force_bgra8_unorm_first");
242       }
243 
244       if (driCheckOption(dri_options, "vk_wsi_force_swapchain_to_current_extent",  DRI_BOOL)) {
245          wsi->force_swapchain_to_currentExtent =
246             driQueryOptionb(dri_options, "vk_wsi_force_swapchain_to_current_extent");
247       }
248    }
249 
250    return VK_SUCCESS;
251 fail:
252    wsi_device_finish(wsi, alloc);
253    return result;
254 }
255 
256 void
wsi_device_finish(struct wsi_device * wsi,const VkAllocationCallbacks * alloc)257 wsi_device_finish(struct wsi_device *wsi,
258                   const VkAllocationCallbacks *alloc)
259 {
260 #ifndef VK_USE_PLATFORM_WIN32_KHR
261    wsi_headless_finish_wsi(wsi, alloc);
262 #endif
263 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
264    wsi_display_finish_wsi(wsi, alloc);
265 #endif
266 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
267    wsi_wl_finish_wsi(wsi, alloc);
268 #endif
269 #ifdef VK_USE_PLATFORM_WIN32_KHR
270    wsi_win32_finish_wsi(wsi, alloc);
271 #endif
272 #ifdef VK_USE_PLATFORM_XCB_KHR
273    wsi_x11_finish_wsi(wsi, alloc);
274 #endif
275 }
276 
277 VKAPI_ATTR void VKAPI_CALL
wsi_DestroySurfaceKHR(VkInstance _instance,VkSurfaceKHR _surface,const VkAllocationCallbacks * pAllocator)278 wsi_DestroySurfaceKHR(VkInstance _instance,
279                       VkSurfaceKHR _surface,
280                       const VkAllocationCallbacks *pAllocator)
281 {
282    VK_FROM_HANDLE(vk_instance, instance, _instance);
283    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
284 
285    if (!surface)
286       return;
287 
288 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
289    if (surface->platform == VK_ICD_WSI_PLATFORM_WAYLAND) {
290       wsi_wl_surface_destroy(surface, _instance, pAllocator);
291       return;
292    }
293 #endif
294 #ifdef VK_USE_PLATFORM_WIN32_KHR
295    if (surface->platform == VK_ICD_WSI_PLATFORM_WIN32) {
296       wsi_win32_surface_destroy(surface, _instance, pAllocator);
297       return;
298    }
299 #endif
300 
301    vk_free2(&instance->alloc, pAllocator, surface);
302 }
303 
304 void
wsi_device_setup_syncobj_fd(struct wsi_device * wsi_device,int fd)305 wsi_device_setup_syncobj_fd(struct wsi_device *wsi_device,
306                             int fd)
307 {
308 #ifdef VK_USE_PLATFORM_DISPLAY_KHR
309    wsi_display_setup_syncobj_fd(wsi_device, fd);
310 #endif
311 }
312 
313 static enum wsi_swapchain_blit_type
get_blit_type(const struct wsi_device * wsi,const struct wsi_base_image_params * params,VkDevice device)314 get_blit_type(const struct wsi_device *wsi,
315               const struct wsi_base_image_params *params,
316               VkDevice device)
317 {
318    switch (params->image_type) {
319    case WSI_IMAGE_TYPE_CPU: {
320       const struct wsi_cpu_image_params *cpu_params =
321          container_of(params, const struct wsi_cpu_image_params, base);
322       return wsi_cpu_image_needs_buffer_blit(wsi, cpu_params) ?
323          WSI_SWAPCHAIN_BUFFER_BLIT : WSI_SWAPCHAIN_NO_BLIT;
324    }
325 #ifdef HAVE_LIBDRM
326    case WSI_IMAGE_TYPE_DRM: {
327       const struct wsi_drm_image_params *drm_params =
328          container_of(params, const struct wsi_drm_image_params, base);
329       return wsi_drm_image_needs_buffer_blit(wsi, drm_params) ?
330          WSI_SWAPCHAIN_BUFFER_BLIT : WSI_SWAPCHAIN_NO_BLIT;
331    }
332 #endif
333 #ifdef _WIN32
334    case WSI_IMAGE_TYPE_DXGI: {
335       const struct wsi_dxgi_image_params *dxgi_params =
336          container_of(params, const struct wsi_dxgi_image_params, base);
337       return wsi_dxgi_image_needs_blit(wsi, dxgi_params, device);
338    }
339 #endif
340    default:
341       unreachable("Invalid image type");
342    }
343 }
344 
345 static VkResult
configure_image(const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,const struct wsi_base_image_params * params,struct wsi_image_info * info)346 configure_image(const struct wsi_swapchain *chain,
347                 const VkSwapchainCreateInfoKHR *pCreateInfo,
348                 const struct wsi_base_image_params *params,
349                 struct wsi_image_info *info)
350 {
351    switch (params->image_type) {
352    case WSI_IMAGE_TYPE_CPU: {
353       const struct wsi_cpu_image_params *cpu_params =
354          container_of(params, const struct wsi_cpu_image_params, base);
355       return wsi_configure_cpu_image(chain, pCreateInfo, cpu_params, info);
356    }
357 #ifdef HAVE_LIBDRM
358    case WSI_IMAGE_TYPE_DRM: {
359       const struct wsi_drm_image_params *drm_params =
360          container_of(params, const struct wsi_drm_image_params, base);
361       return wsi_drm_configure_image(chain, pCreateInfo, drm_params, info);
362    }
363 #endif
364 #ifdef _WIN32
365    case WSI_IMAGE_TYPE_DXGI: {
366       const struct wsi_dxgi_image_params *dxgi_params =
367          container_of(params, const struct wsi_dxgi_image_params, base);
368       return wsi_dxgi_configure_image(chain, pCreateInfo, dxgi_params, info);
369    }
370 #endif
371    default:
372       unreachable("Invalid image type");
373    }
374 }
375 
376 #if defined(HAVE_PTHREAD) && !defined(_WIN32)
377 bool
wsi_init_pthread_cond_monotonic(pthread_cond_t * cond)378 wsi_init_pthread_cond_monotonic(pthread_cond_t *cond)
379 {
380    pthread_condattr_t condattr;
381    bool ret = false;
382 
383    if (pthread_condattr_init(&condattr) != 0)
384       goto fail_attr_init;
385 
386    if (pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC) != 0)
387       goto fail_attr_set;
388 
389    if (pthread_cond_init(cond, &condattr) != 0)
390       goto fail_cond_init;
391 
392    ret = true;
393 
394 fail_cond_init:
395 fail_attr_set:
396    pthread_condattr_destroy(&condattr);
397 fail_attr_init:
398    return ret;
399 }
400 #endif
401 
402 VkResult
wsi_swapchain_init(const struct wsi_device * wsi,struct wsi_swapchain * chain,VkDevice _device,const VkSwapchainCreateInfoKHR * pCreateInfo,const struct wsi_base_image_params * image_params,const VkAllocationCallbacks * pAllocator)403 wsi_swapchain_init(const struct wsi_device *wsi,
404                    struct wsi_swapchain *chain,
405                    VkDevice _device,
406                    const VkSwapchainCreateInfoKHR *pCreateInfo,
407                    const struct wsi_base_image_params *image_params,
408                    const VkAllocationCallbacks *pAllocator)
409 {
410    VK_FROM_HANDLE(vk_device, device, _device);
411    VkResult result;
412 
413    memset(chain, 0, sizeof(*chain));
414 
415    vk_object_base_init(device, &chain->base, VK_OBJECT_TYPE_SWAPCHAIN_KHR);
416 
417    chain->wsi = wsi;
418    chain->device = _device;
419    chain->alloc = *pAllocator;
420    chain->blit.type = get_blit_type(wsi, image_params, _device);
421 
422    chain->blit.queue = VK_NULL_HANDLE;
423    if (chain->blit.type != WSI_SWAPCHAIN_NO_BLIT && wsi->get_blit_queue)
424       chain->blit.queue = wsi->get_blit_queue(_device);
425 
426    int cmd_pools_count = chain->blit.queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
427 
428    chain->cmd_pools =
429       vk_zalloc(pAllocator, sizeof(VkCommandPool) * cmd_pools_count, 8,
430                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
431    if (!chain->cmd_pools)
432       return VK_ERROR_OUT_OF_HOST_MEMORY;
433 
434    for (uint32_t i = 0; i < cmd_pools_count; i++) {
435       int queue_family_index = i;
436 
437       if (chain->blit.queue != VK_NULL_HANDLE) {
438          VK_FROM_HANDLE(vk_queue, queue, chain->blit.queue);
439          queue_family_index = queue->queue_family_index;
440       }
441       const VkCommandPoolCreateInfo cmd_pool_info = {
442          .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
443          .pNext = NULL,
444          .flags = 0,
445          .queueFamilyIndex = queue_family_index,
446       };
447       result = wsi->CreateCommandPool(_device, &cmd_pool_info, &chain->alloc,
448                                       &chain->cmd_pools[i]);
449       if (result != VK_SUCCESS)
450          goto fail;
451    }
452 
453    result = configure_image(chain, pCreateInfo, image_params,
454                             &chain->image_info);
455    if (result != VK_SUCCESS)
456       goto fail;
457 
458    return VK_SUCCESS;
459 
460 fail:
461    wsi_swapchain_finish(chain);
462    return result;
463 }
464 
465 static bool
wsi_swapchain_is_present_mode_supported(struct wsi_device * wsi,const VkSwapchainCreateInfoKHR * pCreateInfo,VkPresentModeKHR mode)466 wsi_swapchain_is_present_mode_supported(struct wsi_device *wsi,
467                                         const VkSwapchainCreateInfoKHR *pCreateInfo,
468                                         VkPresentModeKHR mode)
469 {
470       ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
471       struct wsi_interface *iface = wsi->wsi[surface->platform];
472       VkPresentModeKHR *present_modes;
473       uint32_t present_mode_count;
474       bool supported = false;
475       VkResult result;
476 
477       result = iface->get_present_modes(surface, wsi, &present_mode_count, NULL);
478       if (result != VK_SUCCESS)
479          return supported;
480 
481       present_modes = malloc(present_mode_count * sizeof(*present_modes));
482       if (!present_modes)
483          return supported;
484 
485       result = iface->get_present_modes(surface, wsi, &present_mode_count,
486                                         present_modes);
487       if (result != VK_SUCCESS)
488          goto fail;
489 
490       for (uint32_t i = 0; i < present_mode_count; i++) {
491          if (present_modes[i] == mode) {
492             supported = true;
493             break;
494          }
495       }
496 
497 fail:
498       free(present_modes);
499       return supported;
500 }
501 
502 enum VkPresentModeKHR
wsi_swapchain_get_present_mode(struct wsi_device * wsi,const VkSwapchainCreateInfoKHR * pCreateInfo)503 wsi_swapchain_get_present_mode(struct wsi_device *wsi,
504                                const VkSwapchainCreateInfoKHR *pCreateInfo)
505 {
506    if (wsi->override_present_mode == VK_PRESENT_MODE_MAX_ENUM_KHR)
507       return pCreateInfo->presentMode;
508 
509    if (!wsi_swapchain_is_present_mode_supported(wsi, pCreateInfo,
510                                                 wsi->override_present_mode)) {
511       fprintf(stderr, "Unsupported MESA_VK_WSI_PRESENT_MODE value!\n");
512       return pCreateInfo->presentMode;
513    }
514 
515    return wsi->override_present_mode;
516 }
517 
518 void
wsi_swapchain_finish(struct wsi_swapchain * chain)519 wsi_swapchain_finish(struct wsi_swapchain *chain)
520 {
521    wsi_destroy_image_info(chain, &chain->image_info);
522 
523    if (chain->fences) {
524       for (unsigned i = 0; i < chain->image_count; i++)
525          chain->wsi->DestroyFence(chain->device, chain->fences[i], &chain->alloc);
526 
527       vk_free(&chain->alloc, chain->fences);
528    }
529    if (chain->blit.semaphores) {
530       for (unsigned i = 0; i < chain->image_count; i++)
531          chain->wsi->DestroySemaphore(chain->device, chain->blit.semaphores[i], &chain->alloc);
532 
533       vk_free(&chain->alloc, chain->blit.semaphores);
534    }
535    chain->wsi->DestroySemaphore(chain->device, chain->dma_buf_semaphore,
536                                 &chain->alloc);
537    chain->wsi->DestroySemaphore(chain->device, chain->present_id_timeline,
538                                 &chain->alloc);
539 
540    int cmd_pools_count = chain->blit.queue != VK_NULL_HANDLE ?
541       1 : chain->wsi->queue_family_count;
542    for (uint32_t i = 0; i < cmd_pools_count; i++) {
543       chain->wsi->DestroyCommandPool(chain->device, chain->cmd_pools[i],
544                                      &chain->alloc);
545    }
546    vk_free(&chain->alloc, chain->cmd_pools);
547 
548    vk_object_base_finish(&chain->base);
549 }
550 
551 VkResult
wsi_configure_image(const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,VkExternalMemoryHandleTypeFlags handle_types,struct wsi_image_info * info)552 wsi_configure_image(const struct wsi_swapchain *chain,
553                     const VkSwapchainCreateInfoKHR *pCreateInfo,
554                     VkExternalMemoryHandleTypeFlags handle_types,
555                     struct wsi_image_info *info)
556 {
557    memset(info, 0, sizeof(*info));
558    uint32_t queue_family_count = 1;
559 
560    if (pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT)
561       queue_family_count = pCreateInfo->queueFamilyIndexCount;
562 
563    /*
564     * TODO: there should be no reason to allocate this, but
565     * 15331 shows that games crashed without doing this.
566     */
567    uint32_t *queue_family_indices =
568       vk_alloc(&chain->alloc,
569                sizeof(*queue_family_indices) *
570                queue_family_count,
571                8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
572    if (!queue_family_indices)
573       goto err_oom;
574 
575    if (pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT)
576       for (uint32_t i = 0; i < pCreateInfo->queueFamilyIndexCount; i++)
577          queue_family_indices[i] = pCreateInfo->pQueueFamilyIndices[i];
578 
579    info->create = (VkImageCreateInfo) {
580       .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
581       .flags = VK_IMAGE_CREATE_ALIAS_BIT,
582       .imageType = VK_IMAGE_TYPE_2D,
583       .format = pCreateInfo->imageFormat,
584       .extent = {
585          .width = pCreateInfo->imageExtent.width,
586          .height = pCreateInfo->imageExtent.height,
587          .depth = 1,
588       },
589       .mipLevels = 1,
590       .arrayLayers = 1,
591       .samples = VK_SAMPLE_COUNT_1_BIT,
592       .tiling = VK_IMAGE_TILING_OPTIMAL,
593       .usage = pCreateInfo->imageUsage,
594       .sharingMode = pCreateInfo->imageSharingMode,
595       .queueFamilyIndexCount = queue_family_count,
596       .pQueueFamilyIndices = queue_family_indices,
597       .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
598    };
599 
600    if (handle_types != 0) {
601       info->ext_mem = (VkExternalMemoryImageCreateInfo) {
602          .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
603          .handleTypes = handle_types,
604       };
605       __vk_append_struct(&info->create, &info->ext_mem);
606    }
607 
608    info->wsi = (struct wsi_image_create_info) {
609       .sType = VK_STRUCTURE_TYPE_WSI_IMAGE_CREATE_INFO_MESA,
610    };
611    __vk_append_struct(&info->create, &info->wsi);
612 
613    if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) {
614       info->create.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT |
615                             VK_IMAGE_CREATE_EXTENDED_USAGE_BIT;
616 
617       const VkImageFormatListCreateInfo *format_list_in =
618          vk_find_struct_const(pCreateInfo->pNext,
619                               IMAGE_FORMAT_LIST_CREATE_INFO);
620 
621       assume(format_list_in && format_list_in->viewFormatCount > 0);
622 
623       const uint32_t view_format_count = format_list_in->viewFormatCount;
624       VkFormat *view_formats =
625          vk_alloc(&chain->alloc, sizeof(VkFormat) * view_format_count,
626                   8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
627       if (!view_formats)
628          goto err_oom;
629 
630       ASSERTED bool format_found = false;
631       for (uint32_t i = 0; i < format_list_in->viewFormatCount; i++) {
632          if (pCreateInfo->imageFormat == format_list_in->pViewFormats[i])
633             format_found = true;
634          view_formats[i] = format_list_in->pViewFormats[i];
635       }
636       assert(format_found);
637 
638       info->format_list = (VkImageFormatListCreateInfo) {
639          .sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO,
640          .viewFormatCount = view_format_count,
641          .pViewFormats = view_formats,
642       };
643       __vk_append_struct(&info->create, &info->format_list);
644    }
645 
646    return VK_SUCCESS;
647 
648 err_oom:
649    wsi_destroy_image_info(chain, info);
650    return VK_ERROR_OUT_OF_HOST_MEMORY;
651 }
652 
653 void
wsi_destroy_image_info(const struct wsi_swapchain * chain,struct wsi_image_info * info)654 wsi_destroy_image_info(const struct wsi_swapchain *chain,
655                        struct wsi_image_info *info)
656 {
657    if (info->create.pQueueFamilyIndices != NULL) {
658       vk_free(&chain->alloc, (void *)info->create.pQueueFamilyIndices);
659       info->create.pQueueFamilyIndices = NULL;
660    }
661    if (info->format_list.pViewFormats != NULL) {
662       vk_free(&chain->alloc, (void *)info->format_list.pViewFormats);
663       info->format_list.pViewFormats = NULL;
664    }
665    if (info->drm_mod_list.pDrmFormatModifiers != NULL) {
666       vk_free(&chain->alloc, (void *)info->drm_mod_list.pDrmFormatModifiers);
667       info->drm_mod_list.pDrmFormatModifiers = NULL;
668    }
669    if (info->modifier_props != NULL) {
670       vk_free(&chain->alloc, info->modifier_props);
671       info->modifier_props = NULL;
672    }
673 }
674 
675 VkResult
wsi_create_image(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)676 wsi_create_image(const struct wsi_swapchain *chain,
677                  const struct wsi_image_info *info,
678                  struct wsi_image *image)
679 {
680    const struct wsi_device *wsi = chain->wsi;
681    VkResult result;
682 
683    memset(image, 0, sizeof(*image));
684 
685 #ifndef _WIN32
686    image->dma_buf_fd = -1;
687 #endif
688 
689    result = wsi->CreateImage(chain->device, &info->create,
690                              &chain->alloc, &image->image);
691    if (result != VK_SUCCESS)
692       goto fail;
693 
694    result = info->create_mem(chain, info, image);
695    if (result != VK_SUCCESS)
696       goto fail;
697 
698    result = wsi->BindImageMemory(chain->device, image->image,
699                                  image->memory, 0);
700    if (result != VK_SUCCESS)
701       goto fail;
702 
703    if (info->finish_create) {
704       result = info->finish_create(chain, info, image);
705       if (result != VK_SUCCESS)
706          goto fail;
707    }
708 
709    return VK_SUCCESS;
710 
711 fail:
712    wsi_destroy_image(chain, image);
713    return result;
714 }
715 
716 void
wsi_destroy_image(const struct wsi_swapchain * chain,struct wsi_image * image)717 wsi_destroy_image(const struct wsi_swapchain *chain,
718                   struct wsi_image *image)
719 {
720    const struct wsi_device *wsi = chain->wsi;
721 
722 #ifndef _WIN32
723    if (image->dma_buf_fd >= 0)
724       close(image->dma_buf_fd);
725 #endif
726 
727    if (image->cpu_map != NULL) {
728       wsi->UnmapMemory(chain->device, image->blit.buffer != VK_NULL_HANDLE ?
729                                       image->blit.memory : image->memory);
730    }
731 
732    if (image->blit.cmd_buffers) {
733       int cmd_buffer_count =
734          chain->blit.queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
735 
736       for (uint32_t i = 0; i < cmd_buffer_count; i++) {
737          wsi->FreeCommandBuffers(chain->device, chain->cmd_pools[i],
738                                  1, &image->blit.cmd_buffers[i]);
739       }
740       vk_free(&chain->alloc, image->blit.cmd_buffers);
741    }
742 
743    wsi->FreeMemory(chain->device, image->memory, &chain->alloc);
744    wsi->DestroyImage(chain->device, image->image, &chain->alloc);
745    wsi->DestroyImage(chain->device, image->blit.image, &chain->alloc);
746    wsi->FreeMemory(chain->device, image->blit.memory, &chain->alloc);
747    wsi->DestroyBuffer(chain->device, image->blit.buffer, &chain->alloc);
748 }
749 
750 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,uint32_t queueFamilyIndex,VkSurfaceKHR _surface,VkBool32 * pSupported)751 wsi_GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,
752                                        uint32_t queueFamilyIndex,
753                                        VkSurfaceKHR _surface,
754                                        VkBool32 *pSupported)
755 {
756    VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
757    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
758    struct wsi_device *wsi_device = device->wsi_device;
759    struct wsi_interface *iface = wsi_device->wsi[surface->platform];
760 
761    return iface->get_support(surface, wsi_device,
762                              queueFamilyIndex, pSupported);
763 }
764 
765 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,VkSurfaceCapabilitiesKHR * pSurfaceCapabilities)766 wsi_GetPhysicalDeviceSurfaceCapabilitiesKHR(
767    VkPhysicalDevice physicalDevice,
768    VkSurfaceKHR _surface,
769    VkSurfaceCapabilitiesKHR *pSurfaceCapabilities)
770 {
771    VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
772    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
773    struct wsi_device *wsi_device = device->wsi_device;
774    struct wsi_interface *iface = wsi_device->wsi[surface->platform];
775 
776    VkSurfaceCapabilities2KHR caps2 = {
777       .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
778    };
779 
780    VkResult result = iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
781 
782    if (result == VK_SUCCESS)
783       *pSurfaceCapabilities = caps2.surfaceCapabilities;
784 
785    return result;
786 }
787 
788 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,VkSurfaceCapabilities2KHR * pSurfaceCapabilities)789 wsi_GetPhysicalDeviceSurfaceCapabilities2KHR(
790    VkPhysicalDevice physicalDevice,
791    const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
792    VkSurfaceCapabilities2KHR *pSurfaceCapabilities)
793 {
794    VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
795    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
796    struct wsi_device *wsi_device = device->wsi_device;
797    struct wsi_interface *iface = wsi_device->wsi[surface->platform];
798 
799    return iface->get_capabilities2(surface, wsi_device, pSurfaceInfo->pNext,
800                                    pSurfaceCapabilities);
801 }
802 
803 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,VkSurfaceCapabilities2EXT * pSurfaceCapabilities)804 wsi_GetPhysicalDeviceSurfaceCapabilities2EXT(
805    VkPhysicalDevice physicalDevice,
806    VkSurfaceKHR _surface,
807    VkSurfaceCapabilities2EXT *pSurfaceCapabilities)
808 {
809    VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
810    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
811    struct wsi_device *wsi_device = device->wsi_device;
812    struct wsi_interface *iface = wsi_device->wsi[surface->platform];
813 
814    assert(pSurfaceCapabilities->sType ==
815           VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT);
816 
817    struct wsi_surface_supported_counters counters = {
818       .sType = VK_STRUCTURE_TYPE_WSI_SURFACE_SUPPORTED_COUNTERS_MESA,
819       .pNext = pSurfaceCapabilities->pNext,
820       .supported_surface_counters = 0,
821    };
822 
823    VkSurfaceCapabilities2KHR caps2 = {
824       .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
825       .pNext = &counters,
826    };
827 
828    VkResult result = iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
829 
830    if (result == VK_SUCCESS) {
831       VkSurfaceCapabilities2EXT *ext_caps = pSurfaceCapabilities;
832       VkSurfaceCapabilitiesKHR khr_caps = caps2.surfaceCapabilities;
833 
834       ext_caps->minImageCount = khr_caps.minImageCount;
835       ext_caps->maxImageCount = khr_caps.maxImageCount;
836       ext_caps->currentExtent = khr_caps.currentExtent;
837       ext_caps->minImageExtent = khr_caps.minImageExtent;
838       ext_caps->maxImageExtent = khr_caps.maxImageExtent;
839       ext_caps->maxImageArrayLayers = khr_caps.maxImageArrayLayers;
840       ext_caps->supportedTransforms = khr_caps.supportedTransforms;
841       ext_caps->currentTransform = khr_caps.currentTransform;
842       ext_caps->supportedCompositeAlpha = khr_caps.supportedCompositeAlpha;
843       ext_caps->supportedUsageFlags = khr_caps.supportedUsageFlags;
844       ext_caps->supportedSurfaceCounters = counters.supported_surface_counters;
845    }
846 
847    return result;
848 }
849 
850 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pSurfaceFormatCount,VkSurfaceFormatKHR * pSurfaceFormats)851 wsi_GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,
852                                        VkSurfaceKHR _surface,
853                                        uint32_t *pSurfaceFormatCount,
854                                        VkSurfaceFormatKHR *pSurfaceFormats)
855 {
856    VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
857    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
858    struct wsi_device *wsi_device = device->wsi_device;
859    struct wsi_interface *iface = wsi_device->wsi[surface->platform];
860 
861    return iface->get_formats(surface, wsi_device,
862                              pSurfaceFormatCount, pSurfaceFormats);
863 }
864 
865 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,uint32_t * pSurfaceFormatCount,VkSurfaceFormat2KHR * pSurfaceFormats)866 wsi_GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
867                                         const VkPhysicalDeviceSurfaceInfo2KHR * pSurfaceInfo,
868                                         uint32_t *pSurfaceFormatCount,
869                                         VkSurfaceFormat2KHR *pSurfaceFormats)
870 {
871    VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
872    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pSurfaceInfo->surface);
873    struct wsi_device *wsi_device = device->wsi_device;
874    struct wsi_interface *iface = wsi_device->wsi[surface->platform];
875 
876    return iface->get_formats2(surface, wsi_device, pSurfaceInfo->pNext,
877                               pSurfaceFormatCount, pSurfaceFormats);
878 }
879 
880 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pPresentModeCount,VkPresentModeKHR * pPresentModes)881 wsi_GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice,
882                                             VkSurfaceKHR _surface,
883                                             uint32_t *pPresentModeCount,
884                                             VkPresentModeKHR *pPresentModes)
885 {
886    VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
887    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
888    struct wsi_device *wsi_device = device->wsi_device;
889    struct wsi_interface *iface = wsi_device->wsi[surface->platform];
890 
891    return iface->get_present_modes(surface, wsi_device, pPresentModeCount,
892                                    pPresentModes);
893 }
894 
895 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice,VkSurfaceKHR _surface,uint32_t * pRectCount,VkRect2D * pRects)896 wsi_GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice,
897                                           VkSurfaceKHR _surface,
898                                           uint32_t *pRectCount,
899                                           VkRect2D *pRects)
900 {
901    VK_FROM_HANDLE(vk_physical_device, device, physicalDevice);
902    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, _surface);
903    struct wsi_device *wsi_device = device->wsi_device;
904    struct wsi_interface *iface = wsi_device->wsi[surface->platform];
905 
906    return iface->get_present_rectangles(surface, wsi_device,
907                                         pRectCount, pRects);
908 }
909 
910 VKAPI_ATTR VkResult VKAPI_CALL
wsi_CreateSwapchainKHR(VkDevice _device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain)911 wsi_CreateSwapchainKHR(VkDevice _device,
912                        const VkSwapchainCreateInfoKHR *pCreateInfo,
913                        const VkAllocationCallbacks *pAllocator,
914                        VkSwapchainKHR *pSwapchain)
915 {
916    MESA_TRACE_FUNC();
917    VK_FROM_HANDLE(vk_device, device, _device);
918    ICD_FROM_HANDLE(VkIcdSurfaceBase, surface, pCreateInfo->surface);
919    struct wsi_device *wsi_device = device->physical->wsi_device;
920    struct wsi_interface *iface = wsi_device->force_headless_swapchain ?
921       wsi_device->wsi[VK_ICD_WSI_PLATFORM_HEADLESS] :
922       wsi_device->wsi[surface->platform];
923    const VkAllocationCallbacks *alloc;
924    struct wsi_swapchain *swapchain;
925 
926    if (pAllocator)
927      alloc = pAllocator;
928    else
929      alloc = &device->alloc;
930 
931    VkSwapchainCreateInfoKHR info = *pCreateInfo;
932 
933    if (wsi_device->force_swapchain_to_currentExtent) {
934       VkSurfaceCapabilities2KHR caps2 = {
935          .sType = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR,
936       };
937       iface->get_capabilities2(surface, wsi_device, NULL, &caps2);
938       info.imageExtent = caps2.surfaceCapabilities.currentExtent;
939    }
940 
941    /* Ignore DEFERRED_MEMORY_ALLOCATION_BIT. Would require deep plumbing to be able to take advantage of it.
942     * bool deferred_allocation = pCreateInfo->flags & VK_SWAPCHAIN_CREATE_DEFERRED_MEMORY_ALLOCATION_BIT_EXT;
943     */
944 
945    VkResult result = iface->create_swapchain(surface, _device, wsi_device,
946                                              &info, alloc,
947                                              &swapchain);
948    if (result != VK_SUCCESS)
949       return result;
950 
951    swapchain->fences = vk_zalloc(alloc,
952                                  sizeof (*swapchain->fences) * swapchain->image_count,
953                                  sizeof (*swapchain->fences),
954                                  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
955    if (!swapchain->fences) {
956       swapchain->destroy(swapchain, alloc);
957       return VK_ERROR_OUT_OF_HOST_MEMORY;
958    }
959 
960    if (wsi_device->khr_present_wait) {
961       const VkSemaphoreTypeCreateInfo type_info = {
962          .sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,
963          .semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE,
964       };
965 
966       const VkSemaphoreCreateInfo sem_info = {
967          .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
968          .pNext = &type_info,
969          .flags = 0,
970       };
971 
972       /* We assume here that a driver exposing present_wait also exposes VK_KHR_timeline_semaphore. */
973       result = wsi_device->CreateSemaphore(_device, &sem_info, alloc, &swapchain->present_id_timeline);
974       if (result != VK_SUCCESS) {
975          swapchain->destroy(swapchain, alloc);
976          return VK_ERROR_OUT_OF_HOST_MEMORY;
977       }
978    }
979 
980    if (swapchain->blit.queue != VK_NULL_HANDLE) {
981       swapchain->blit.semaphores = vk_zalloc(alloc,
982                                          sizeof (*swapchain->blit.semaphores) * swapchain->image_count,
983                                          sizeof (*swapchain->blit.semaphores),
984                                          VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
985       if (!swapchain->blit.semaphores) {
986          wsi_device->DestroySemaphore(_device, swapchain->present_id_timeline, alloc);
987          swapchain->destroy(swapchain, alloc);
988          return VK_ERROR_OUT_OF_HOST_MEMORY;
989       }
990    }
991 
992    *pSwapchain = wsi_swapchain_to_handle(swapchain);
993 
994    return VK_SUCCESS;
995 }
996 
997 VKAPI_ATTR void VKAPI_CALL
wsi_DestroySwapchainKHR(VkDevice _device,VkSwapchainKHR _swapchain,const VkAllocationCallbacks * pAllocator)998 wsi_DestroySwapchainKHR(VkDevice _device,
999                         VkSwapchainKHR _swapchain,
1000                         const VkAllocationCallbacks *pAllocator)
1001 {
1002    MESA_TRACE_FUNC();
1003    VK_FROM_HANDLE(vk_device, device, _device);
1004    VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
1005    const VkAllocationCallbacks *alloc;
1006 
1007    if (!swapchain)
1008       return;
1009 
1010    if (pAllocator)
1011      alloc = pAllocator;
1012    else
1013      alloc = &device->alloc;
1014 
1015    swapchain->destroy(swapchain, alloc);
1016 }
1017 
1018 VKAPI_ATTR VkResult VKAPI_CALL
wsi_ReleaseSwapchainImagesEXT(VkDevice _device,const VkReleaseSwapchainImagesInfoEXT * pReleaseInfo)1019 wsi_ReleaseSwapchainImagesEXT(VkDevice _device,
1020                               const VkReleaseSwapchainImagesInfoEXT *pReleaseInfo)
1021 {
1022    VK_FROM_HANDLE(wsi_swapchain, swapchain, pReleaseInfo->swapchain);
1023    VkResult result = swapchain->release_images(swapchain,
1024                                                pReleaseInfo->imageIndexCount,
1025                                                pReleaseInfo->pImageIndices);
1026 
1027    if (result != VK_SUCCESS)
1028       return result;
1029 
1030    if (swapchain->wsi->set_memory_ownership) {
1031       for (uint32_t i = 0; i < pReleaseInfo->imageIndexCount; i++) {
1032          uint32_t image_index = pReleaseInfo->pImageIndices[i];
1033          VkDeviceMemory mem = swapchain->get_wsi_image(swapchain, image_index)->memory;
1034          swapchain->wsi->set_memory_ownership(swapchain->device, mem, false);
1035       }
1036    }
1037 
1038    return VK_SUCCESS;
1039 }
1040 
1041 VkResult
wsi_common_get_images(VkSwapchainKHR _swapchain,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)1042 wsi_common_get_images(VkSwapchainKHR _swapchain,
1043                       uint32_t *pSwapchainImageCount,
1044                       VkImage *pSwapchainImages)
1045 {
1046    VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
1047    VK_OUTARRAY_MAKE_TYPED(VkImage, images, pSwapchainImages, pSwapchainImageCount);
1048 
1049    for (uint32_t i = 0; i < swapchain->image_count; i++) {
1050       vk_outarray_append_typed(VkImage, &images, image) {
1051          *image = swapchain->get_wsi_image(swapchain, i)->image;
1052       }
1053    }
1054 
1055    return vk_outarray_status(&images);
1056 }
1057 
1058 VkImage
wsi_common_get_image(VkSwapchainKHR _swapchain,uint32_t index)1059 wsi_common_get_image(VkSwapchainKHR _swapchain, uint32_t index)
1060 {
1061    VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
1062    assert(index < swapchain->image_count);
1063    return swapchain->get_wsi_image(swapchain, index)->image;
1064 }
1065 
1066 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetSwapchainImagesKHR(VkDevice device,VkSwapchainKHR swapchain,uint32_t * pSwapchainImageCount,VkImage * pSwapchainImages)1067 wsi_GetSwapchainImagesKHR(VkDevice device,
1068                           VkSwapchainKHR swapchain,
1069                           uint32_t *pSwapchainImageCount,
1070                           VkImage *pSwapchainImages)
1071 {
1072    MESA_TRACE_FUNC();
1073    return wsi_common_get_images(swapchain,
1074                                 pSwapchainImageCount,
1075                                 pSwapchainImages);
1076 }
1077 
1078 VKAPI_ATTR VkResult VKAPI_CALL
wsi_AcquireNextImageKHR(VkDevice _device,VkSwapchainKHR swapchain,uint64_t timeout,VkSemaphore semaphore,VkFence fence,uint32_t * pImageIndex)1079 wsi_AcquireNextImageKHR(VkDevice _device,
1080                         VkSwapchainKHR swapchain,
1081                         uint64_t timeout,
1082                         VkSemaphore semaphore,
1083                         VkFence fence,
1084                         uint32_t *pImageIndex)
1085 {
1086    MESA_TRACE_FUNC();
1087    VK_FROM_HANDLE(vk_device, device, _device);
1088 
1089    const VkAcquireNextImageInfoKHR acquire_info = {
1090       .sType = VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR,
1091       .swapchain = swapchain,
1092       .timeout = timeout,
1093       .semaphore = semaphore,
1094       .fence = fence,
1095       .deviceMask = 0,
1096    };
1097 
1098    return device->dispatch_table.AcquireNextImage2KHR(_device, &acquire_info,
1099                                                       pImageIndex);
1100 }
1101 
1102 static VkResult
wsi_signal_semaphore_for_image(struct vk_device * device,const struct wsi_swapchain * chain,const struct wsi_image * image,VkSemaphore _semaphore)1103 wsi_signal_semaphore_for_image(struct vk_device *device,
1104                                const struct wsi_swapchain *chain,
1105                                const struct wsi_image *image,
1106                                VkSemaphore _semaphore)
1107 {
1108    if (device->physical->supported_sync_types == NULL)
1109       return VK_SUCCESS;
1110 
1111    VK_FROM_HANDLE(vk_semaphore, semaphore, _semaphore);
1112 
1113    vk_semaphore_reset_temporary(device, semaphore);
1114 
1115 #ifdef HAVE_LIBDRM
1116    VkResult result = wsi_create_sync_for_dma_buf_wait(chain, image,
1117                                                       VK_SYNC_FEATURE_GPU_WAIT,
1118                                                       &semaphore->temporary);
1119    if (result != VK_ERROR_FEATURE_NOT_PRESENT)
1120       return result;
1121 #endif
1122 
1123    if (chain->wsi->signal_semaphore_with_memory) {
1124       return device->create_sync_for_memory(device, image->memory,
1125                                             false /* signal_memory */,
1126                                             &semaphore->temporary);
1127    } else {
1128       return vk_sync_create(device, &vk_sync_dummy_type,
1129                             0 /* flags */, 0 /* initial_value */,
1130                             &semaphore->temporary);
1131    }
1132 }
1133 
1134 static VkResult
wsi_signal_fence_for_image(struct vk_device * device,const struct wsi_swapchain * chain,const struct wsi_image * image,VkFence _fence)1135 wsi_signal_fence_for_image(struct vk_device *device,
1136                            const struct wsi_swapchain *chain,
1137                            const struct wsi_image *image,
1138                            VkFence _fence)
1139 {
1140    if (device->physical->supported_sync_types == NULL)
1141       return VK_SUCCESS;
1142 
1143    VK_FROM_HANDLE(vk_fence, fence, _fence);
1144 
1145    vk_fence_reset_temporary(device, fence);
1146 
1147 #ifdef HAVE_LIBDRM
1148    VkResult result = wsi_create_sync_for_dma_buf_wait(chain, image,
1149                                                       VK_SYNC_FEATURE_CPU_WAIT,
1150                                                       &fence->temporary);
1151    if (result != VK_ERROR_FEATURE_NOT_PRESENT)
1152       return result;
1153 #endif
1154 
1155    if (chain->wsi->signal_fence_with_memory) {
1156       return device->create_sync_for_memory(device, image->memory,
1157                                             false /* signal_memory */,
1158                                             &fence->temporary);
1159    } else {
1160       return vk_sync_create(device, &vk_sync_dummy_type,
1161                             0 /* flags */, 0 /* initial_value */,
1162                             &fence->temporary);
1163    }
1164 }
1165 
1166 VkResult
wsi_common_acquire_next_image2(const struct wsi_device * wsi,VkDevice _device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)1167 wsi_common_acquire_next_image2(const struct wsi_device *wsi,
1168                                VkDevice _device,
1169                                const VkAcquireNextImageInfoKHR *pAcquireInfo,
1170                                uint32_t *pImageIndex)
1171 {
1172    VK_FROM_HANDLE(wsi_swapchain, swapchain, pAcquireInfo->swapchain);
1173    VK_FROM_HANDLE(vk_device, device, _device);
1174 
1175    VkResult result = swapchain->acquire_next_image(swapchain, pAcquireInfo,
1176                                                    pImageIndex);
1177    if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
1178       return result;
1179    struct wsi_image *image =
1180       swapchain->get_wsi_image(swapchain, *pImageIndex);
1181 
1182    if (pAcquireInfo->semaphore != VK_NULL_HANDLE) {
1183       VkResult signal_result =
1184          wsi_signal_semaphore_for_image(device, swapchain, image,
1185                                         pAcquireInfo->semaphore);
1186       if (signal_result != VK_SUCCESS)
1187          return signal_result;
1188    }
1189 
1190    if (pAcquireInfo->fence != VK_NULL_HANDLE) {
1191       VkResult signal_result =
1192          wsi_signal_fence_for_image(device, swapchain, image,
1193                                     pAcquireInfo->fence);
1194       if (signal_result != VK_SUCCESS)
1195          return signal_result;
1196    }
1197 
1198    if (wsi->set_memory_ownership)
1199       wsi->set_memory_ownership(swapchain->device, image->memory, true);
1200 
1201    return result;
1202 }
1203 
1204 VKAPI_ATTR VkResult VKAPI_CALL
wsi_AcquireNextImage2KHR(VkDevice _device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)1205 wsi_AcquireNextImage2KHR(VkDevice _device,
1206                          const VkAcquireNextImageInfoKHR *pAcquireInfo,
1207                          uint32_t *pImageIndex)
1208 {
1209    MESA_TRACE_FUNC();
1210    VK_FROM_HANDLE(vk_device, device, _device);
1211 
1212    return wsi_common_acquire_next_image2(device->physical->wsi_device,
1213                                          _device, pAcquireInfo, pImageIndex);
1214 }
1215 
wsi_signal_present_id_timeline(struct wsi_swapchain * swapchain,VkQueue queue,uint64_t present_id,VkFence present_fence)1216 static VkResult wsi_signal_present_id_timeline(struct wsi_swapchain *swapchain,
1217                                                VkQueue queue, uint64_t present_id,
1218                                                VkFence present_fence)
1219 {
1220    assert(swapchain->present_id_timeline || present_fence);
1221 
1222    const VkTimelineSemaphoreSubmitInfo timeline_info = {
1223       .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,
1224       .pSignalSemaphoreValues = &present_id,
1225       .signalSemaphoreValueCount = 1,
1226    };
1227 
1228    const VkSubmitInfo submit_info = {
1229       .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
1230       .pNext = &timeline_info,
1231       .signalSemaphoreCount = 1,
1232       .pSignalSemaphores = &swapchain->present_id_timeline,
1233    };
1234 
1235    uint32_t submit_count = present_id ? 1 : 0;
1236    return swapchain->wsi->QueueSubmit(queue, submit_count, &submit_info, present_fence);
1237 }
1238 
1239 static VkResult
handle_trace(VkQueue queue,struct vk_device * device)1240 handle_trace(VkQueue queue, struct vk_device *device)
1241 {
1242    struct vk_instance *instance = device->physical->instance;
1243    if (!instance->trace_mode)
1244       return VK_SUCCESS;
1245 
1246    simple_mtx_lock(&device->trace_mtx);
1247 
1248    bool frame_trigger = device->current_frame == instance->trace_frame;
1249    if (device->current_frame <= instance->trace_frame)
1250       device->current_frame++;
1251 
1252    bool file_trigger = false;
1253 #ifndef _WIN32
1254    if (instance->trace_trigger_file && access(instance->trace_trigger_file, W_OK) == 0) {
1255       if (unlink(instance->trace_trigger_file) == 0) {
1256          file_trigger = true;
1257       } else {
1258          /* Do not enable tracing if we cannot remove the file,
1259           * because by then we'll trace every frame ... */
1260          fprintf(stderr, "Could not remove trace trigger file, ignoring\n");
1261       }
1262    }
1263 #endif
1264 
1265    VkResult result = VK_SUCCESS;
1266    if (frame_trigger || file_trigger || device->trace_hotkey_trigger)
1267       result = device->capture_trace(queue);
1268 
1269    device->trace_hotkey_trigger = false;
1270 
1271    simple_mtx_unlock(&device->trace_mtx);
1272 
1273    return result;
1274 }
1275 
1276 VkResult
wsi_common_queue_present(const struct wsi_device * wsi,VkDevice device,VkQueue queue,int queue_family_index,const VkPresentInfoKHR * pPresentInfo)1277 wsi_common_queue_present(const struct wsi_device *wsi,
1278                          VkDevice device,
1279                          VkQueue queue,
1280                          int queue_family_index,
1281                          const VkPresentInfoKHR *pPresentInfo)
1282 {
1283    VkResult final_result = handle_trace(queue, vk_device_from_handle(device));
1284 
1285    STACK_ARRAY(VkPipelineStageFlags, stage_flags,
1286                MAX2(1, pPresentInfo->waitSemaphoreCount));
1287    for (uint32_t s = 0; s < MAX2(1, pPresentInfo->waitSemaphoreCount); s++)
1288       stage_flags[s] = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
1289 
1290    const VkPresentRegionsKHR *regions =
1291       vk_find_struct_const(pPresentInfo->pNext, PRESENT_REGIONS_KHR);
1292    const VkPresentIdKHR *present_ids =
1293       vk_find_struct_const(pPresentInfo->pNext, PRESENT_ID_KHR);
1294    const VkSwapchainPresentFenceInfoEXT *present_fence_info =
1295       vk_find_struct_const(pPresentInfo->pNext, SWAPCHAIN_PRESENT_FENCE_INFO_EXT);
1296    const VkSwapchainPresentModeInfoEXT *present_mode_info =
1297       vk_find_struct_const(pPresentInfo->pNext, SWAPCHAIN_PRESENT_MODE_INFO_EXT);
1298 
1299    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
1300       VK_FROM_HANDLE(wsi_swapchain, swapchain, pPresentInfo->pSwapchains[i]);
1301       uint32_t image_index = pPresentInfo->pImageIndices[i];
1302       VkResult result;
1303 
1304       /* Update the present mode for this present and any subsequent present. */
1305       if (present_mode_info && present_mode_info->pPresentModes && swapchain->set_present_mode)
1306          swapchain->set_present_mode(swapchain, present_mode_info->pPresentModes[i]);
1307 
1308       if (swapchain->fences[image_index] == VK_NULL_HANDLE) {
1309          const VkFenceCreateInfo fence_info = {
1310             .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
1311             .pNext = NULL,
1312             .flags = VK_FENCE_CREATE_SIGNALED_BIT,
1313          };
1314          result = wsi->CreateFence(device, &fence_info,
1315                                    &swapchain->alloc,
1316                                    &swapchain->fences[image_index]);
1317          if (result != VK_SUCCESS)
1318             goto fail_present;
1319 
1320          if (swapchain->blit.type != WSI_SWAPCHAIN_NO_BLIT &&
1321              swapchain->blit.queue != VK_NULL_HANDLE) {
1322             const VkSemaphoreCreateInfo sem_info = {
1323                .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
1324                .pNext = NULL,
1325                .flags = 0,
1326             };
1327             result = wsi->CreateSemaphore(device, &sem_info,
1328                                           &swapchain->alloc,
1329                                           &swapchain->blit.semaphores[image_index]);
1330             if (result != VK_SUCCESS)
1331                goto fail_present;
1332          }
1333       } else {
1334          MESA_TRACE_SCOPE("throttle");
1335          result =
1336             wsi->WaitForFences(device, 1, &swapchain->fences[image_index],
1337                                true, ~0ull);
1338          if (result != VK_SUCCESS)
1339             goto fail_present;
1340       }
1341 
1342       result = wsi->ResetFences(device, 1, &swapchain->fences[image_index]);
1343       if (result != VK_SUCCESS)
1344          goto fail_present;
1345 
1346       VkSubmitInfo submit_info = {
1347          .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
1348       };
1349 
1350       if (i == 0) {
1351          /* We only need/want to wait on semaphores once.  After that, we're
1352           * guaranteed ordering since it all happens on the same queue.
1353           */
1354          submit_info.waitSemaphoreCount = pPresentInfo->waitSemaphoreCount;
1355          submit_info.pWaitSemaphores = pPresentInfo->pWaitSemaphores;
1356          submit_info.pWaitDstStageMask = stage_flags;
1357       }
1358 
1359       struct wsi_image *image =
1360          swapchain->get_wsi_image(swapchain, image_index);
1361 
1362       VkQueue submit_queue = queue;
1363       if (swapchain->blit.type != WSI_SWAPCHAIN_NO_BLIT) {
1364          if (swapchain->blit.queue == VK_NULL_HANDLE) {
1365             submit_info.commandBufferCount = 1;
1366             submit_info.pCommandBuffers =
1367                &image->blit.cmd_buffers[queue_family_index];
1368          } else {
1369             /* If we are using a blit using the driver's private queue, then
1370              * do an empty submit signalling a semaphore, and then submit the
1371              * blit waiting on that.  This ensures proper queue ordering of
1372              * vkQueueSubmit() calls.
1373              */
1374             submit_info.signalSemaphoreCount = 1;
1375             submit_info.pSignalSemaphores =
1376                &swapchain->blit.semaphores[image_index];
1377 
1378             result = wsi->QueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
1379             if (result != VK_SUCCESS)
1380                goto fail_present;
1381 
1382             /* Now prepare the blit submit.  It needs to then wait on the
1383              * semaphore we signaled above.
1384              */
1385             submit_queue = swapchain->blit.queue;
1386             submit_info.waitSemaphoreCount = 1;
1387             submit_info.pWaitSemaphores = submit_info.pSignalSemaphores;
1388             submit_info.signalSemaphoreCount = 0;
1389             submit_info.pSignalSemaphores = NULL;
1390             submit_info.commandBufferCount = 1;
1391             submit_info.pCommandBuffers = &image->blit.cmd_buffers[0];
1392             submit_info.pWaitDstStageMask = stage_flags;
1393          }
1394       }
1395 
1396       VkFence fence = swapchain->fences[image_index];
1397 
1398       bool has_signal_dma_buf = false;
1399 #ifdef HAVE_LIBDRM
1400       result = wsi_prepare_signal_dma_buf_from_semaphore(swapchain, image);
1401       if (result == VK_SUCCESS) {
1402          assert(submit_info.signalSemaphoreCount == 0);
1403          submit_info.signalSemaphoreCount = 1;
1404          submit_info.pSignalSemaphores = &swapchain->dma_buf_semaphore;
1405          has_signal_dma_buf = true;
1406       } else if (result == VK_ERROR_FEATURE_NOT_PRESENT) {
1407          result = VK_SUCCESS;
1408          has_signal_dma_buf = false;
1409       } else {
1410          goto fail_present;
1411       }
1412 #endif
1413 
1414       struct wsi_memory_signal_submit_info mem_signal;
1415       if (!has_signal_dma_buf) {
1416          /* If we don't have dma-buf signaling, signal the memory object by
1417           * chaining wsi_memory_signal_submit_info into VkSubmitInfo.
1418           */
1419          result = VK_SUCCESS;
1420          has_signal_dma_buf = false;
1421          mem_signal = (struct wsi_memory_signal_submit_info) {
1422             .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA,
1423             .memory = image->memory,
1424          };
1425          __vk_append_struct(&submit_info, &mem_signal);
1426       }
1427 
1428       result = wsi->QueueSubmit(submit_queue, 1, &submit_info, fence);
1429       if (result != VK_SUCCESS)
1430          goto fail_present;
1431 
1432 #ifdef HAVE_LIBDRM
1433       if (has_signal_dma_buf) {
1434          result = wsi_signal_dma_buf_from_semaphore(swapchain, image);
1435          if (result != VK_SUCCESS)
1436             goto fail_present;
1437       }
1438 #else
1439       assert(!has_signal_dma_buf);
1440 #endif
1441 
1442       if (wsi->sw)
1443 	      wsi->WaitForFences(device, 1, &swapchain->fences[image_index],
1444 				 true, ~0ull);
1445 
1446       const VkPresentRegionKHR *region = NULL;
1447       if (regions && regions->pRegions)
1448          region = &regions->pRegions[i];
1449 
1450       uint64_t present_id = 0;
1451       if (present_ids && present_ids->pPresentIds)
1452          present_id = present_ids->pPresentIds[i];
1453       VkFence present_fence = VK_NULL_HANDLE;
1454       if (present_fence_info && present_fence_info->pFences)
1455          present_fence = present_fence_info->pFences[i];
1456 
1457       if (present_id || present_fence) {
1458          result = wsi_signal_present_id_timeline(swapchain, queue, present_id, present_fence);
1459          if (result != VK_SUCCESS)
1460             goto fail_present;
1461       }
1462 
1463       result = swapchain->queue_present(swapchain, image_index, present_id, region);
1464       if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR)
1465          goto fail_present;
1466 
1467       if (wsi->set_memory_ownership) {
1468          VkDeviceMemory mem = swapchain->get_wsi_image(swapchain, image_index)->memory;
1469          wsi->set_memory_ownership(swapchain->device, mem, false);
1470       }
1471 
1472    fail_present:
1473       if (pPresentInfo->pResults != NULL)
1474          pPresentInfo->pResults[i] = result;
1475 
1476       /* Let the final result be our first unsuccessful result */
1477       if (final_result == VK_SUCCESS)
1478          final_result = result;
1479    }
1480 
1481    STACK_ARRAY_FINISH(stage_flags);
1482 
1483    return final_result;
1484 }
1485 
1486 VKAPI_ATTR VkResult VKAPI_CALL
wsi_QueuePresentKHR(VkQueue _queue,const VkPresentInfoKHR * pPresentInfo)1487 wsi_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
1488 {
1489    MESA_TRACE_FUNC();
1490    VK_FROM_HANDLE(vk_queue, queue, _queue);
1491 
1492    return wsi_common_queue_present(queue->base.device->physical->wsi_device,
1493                                    vk_device_to_handle(queue->base.device),
1494                                    _queue,
1495                                    queue->queue_family_index,
1496                                    pPresentInfo);
1497 }
1498 
1499 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetDeviceGroupPresentCapabilitiesKHR(VkDevice device,VkDeviceGroupPresentCapabilitiesKHR * pCapabilities)1500 wsi_GetDeviceGroupPresentCapabilitiesKHR(VkDevice device,
1501                                          VkDeviceGroupPresentCapabilitiesKHR *pCapabilities)
1502 {
1503    memset(pCapabilities->presentMask, 0,
1504           sizeof(pCapabilities->presentMask));
1505    pCapabilities->presentMask[0] = 0x1;
1506    pCapabilities->modes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
1507 
1508    return VK_SUCCESS;
1509 }
1510 
1511 VKAPI_ATTR VkResult VKAPI_CALL
wsi_GetDeviceGroupSurfacePresentModesKHR(VkDevice device,VkSurfaceKHR surface,VkDeviceGroupPresentModeFlagsKHR * pModes)1512 wsi_GetDeviceGroupSurfacePresentModesKHR(VkDevice device,
1513                                          VkSurfaceKHR surface,
1514                                          VkDeviceGroupPresentModeFlagsKHR *pModes)
1515 {
1516    *pModes = VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR;
1517 
1518    return VK_SUCCESS;
1519 }
1520 
1521 bool
wsi_common_vk_instance_supports_present_wait(const struct vk_instance * instance)1522 wsi_common_vk_instance_supports_present_wait(const struct vk_instance *instance)
1523 {
1524    /* We can only expose KHR_present_wait and KHR_present_id
1525     * if we are guaranteed support on all potential VkSurfaceKHR objects. */
1526    if (instance->enabled_extensions.KHR_wayland_surface ||
1527          instance->enabled_extensions.KHR_win32_surface ||
1528          instance->enabled_extensions.KHR_android_surface) {
1529       return false;
1530    }
1531 
1532    return true;
1533 }
1534 
1535 VkResult
wsi_common_create_swapchain_image(const struct wsi_device * wsi,const VkImageCreateInfo * pCreateInfo,VkSwapchainKHR _swapchain,VkImage * pImage)1536 wsi_common_create_swapchain_image(const struct wsi_device *wsi,
1537                                   const VkImageCreateInfo *pCreateInfo,
1538                                   VkSwapchainKHR _swapchain,
1539                                   VkImage *pImage)
1540 {
1541    VK_FROM_HANDLE(wsi_swapchain, chain, _swapchain);
1542 
1543 #ifndef NDEBUG
1544    const VkImageCreateInfo *swcInfo = &chain->image_info.create;
1545    assert(pCreateInfo->flags == 0);
1546    assert(pCreateInfo->imageType == swcInfo->imageType);
1547    assert(pCreateInfo->format == swcInfo->format);
1548    assert(pCreateInfo->extent.width == swcInfo->extent.width);
1549    assert(pCreateInfo->extent.height == swcInfo->extent.height);
1550    assert(pCreateInfo->extent.depth == swcInfo->extent.depth);
1551    assert(pCreateInfo->mipLevels == swcInfo->mipLevels);
1552    assert(pCreateInfo->arrayLayers == swcInfo->arrayLayers);
1553    assert(pCreateInfo->samples == swcInfo->samples);
1554    assert(pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL);
1555    assert(!(pCreateInfo->usage & ~swcInfo->usage));
1556 
1557    vk_foreach_struct_const(ext, pCreateInfo->pNext) {
1558       switch (ext->sType) {
1559       case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO: {
1560          const VkImageFormatListCreateInfo *iflci =
1561             (const VkImageFormatListCreateInfo *)ext;
1562          const VkImageFormatListCreateInfo *swc_iflci =
1563             &chain->image_info.format_list;
1564 
1565          for (uint32_t i = 0; i < iflci->viewFormatCount; i++) {
1566             bool found = false;
1567             for (uint32_t j = 0; j < swc_iflci->viewFormatCount; j++) {
1568                if (iflci->pViewFormats[i] == swc_iflci->pViewFormats[j]) {
1569                   found = true;
1570                   break;
1571                }
1572             }
1573             assert(found);
1574          }
1575          break;
1576       }
1577 
1578       case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
1579          break;
1580 
1581       default:
1582          assert(!"Unsupported image create extension");
1583       }
1584    }
1585 #endif
1586 
1587    return wsi->CreateImage(chain->device, &chain->image_info.create,
1588                            &chain->alloc, pImage);
1589 }
1590 
1591 VkResult
wsi_common_bind_swapchain_image(const struct wsi_device * wsi,VkImage vk_image,VkSwapchainKHR _swapchain,uint32_t image_idx)1592 wsi_common_bind_swapchain_image(const struct wsi_device *wsi,
1593                                 VkImage vk_image,
1594                                 VkSwapchainKHR _swapchain,
1595                                 uint32_t image_idx)
1596 {
1597    VK_FROM_HANDLE(wsi_swapchain, chain, _swapchain);
1598    struct wsi_image *image = chain->get_wsi_image(chain, image_idx);
1599 
1600    return wsi->BindImageMemory(chain->device, vk_image, image->memory, 0);
1601 }
1602 
1603 VkResult
wsi_swapchain_wait_for_present_semaphore(const struct wsi_swapchain * chain,uint64_t present_id,uint64_t timeout)1604 wsi_swapchain_wait_for_present_semaphore(const struct wsi_swapchain *chain,
1605                                          uint64_t present_id, uint64_t timeout)
1606 {
1607    assert(chain->present_id_timeline);
1608    const VkSemaphoreWaitInfo wait_info = {
1609       .sType = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO,
1610       .semaphoreCount = 1,
1611       .pSemaphores = &chain->present_id_timeline,
1612       .pValues = &present_id,
1613    };
1614 
1615    return chain->wsi->WaitSemaphoresKHR(chain->device, &wait_info, timeout);
1616 }
1617 
1618 uint32_t
wsi_select_memory_type(const struct wsi_device * wsi,VkMemoryPropertyFlags req_props,VkMemoryPropertyFlags deny_props,uint32_t type_bits)1619 wsi_select_memory_type(const struct wsi_device *wsi,
1620                        VkMemoryPropertyFlags req_props,
1621                        VkMemoryPropertyFlags deny_props,
1622                        uint32_t type_bits)
1623 {
1624    assert(type_bits != 0);
1625 
1626    VkMemoryPropertyFlags common_props = ~0;
1627    u_foreach_bit(t, type_bits) {
1628       const VkMemoryType type = wsi->memory_props.memoryTypes[t];
1629 
1630       common_props &= type.propertyFlags;
1631 
1632       if (deny_props & type.propertyFlags)
1633          continue;
1634 
1635       if (!(req_props & ~type.propertyFlags))
1636          return t;
1637    }
1638 
1639    if ((deny_props & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) &&
1640        (common_props & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) {
1641       /* If they asked for non-device-local and all the types are device-local
1642        * (this is commonly true for UMA platforms), try again without denying
1643        * device-local types
1644        */
1645       deny_props &= ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
1646       return wsi_select_memory_type(wsi, req_props, deny_props, type_bits);
1647    }
1648 
1649    unreachable("No memory type found");
1650 }
1651 
1652 uint32_t
wsi_select_device_memory_type(const struct wsi_device * wsi,uint32_t type_bits)1653 wsi_select_device_memory_type(const struct wsi_device *wsi,
1654                               uint32_t type_bits)
1655 {
1656    return wsi_select_memory_type(wsi, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
1657                                  0 /* deny_props */, type_bits);
1658 }
1659 
1660 static uint32_t
wsi_select_host_memory_type(const struct wsi_device * wsi,uint32_t type_bits)1661 wsi_select_host_memory_type(const struct wsi_device *wsi,
1662                             uint32_t type_bits)
1663 {
1664    return wsi_select_memory_type(wsi, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
1665                                  0 /* deny_props */, type_bits);
1666 }
1667 
1668 VkResult
wsi_create_buffer_blit_context(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image,VkExternalMemoryHandleTypeFlags handle_types,bool implicit_sync)1669 wsi_create_buffer_blit_context(const struct wsi_swapchain *chain,
1670                                const struct wsi_image_info *info,
1671                                struct wsi_image *image,
1672                                VkExternalMemoryHandleTypeFlags handle_types,
1673                                bool implicit_sync)
1674 {
1675    assert(chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT);
1676 
1677    const struct wsi_device *wsi = chain->wsi;
1678    VkResult result;
1679 
1680    const VkExternalMemoryBufferCreateInfo buffer_external_info = {
1681       .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO,
1682       .pNext = NULL,
1683       .handleTypes = handle_types,
1684    };
1685    const VkBufferCreateInfo buffer_info = {
1686       .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
1687       .pNext = &buffer_external_info,
1688       .size = info->linear_size,
1689       .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT,
1690       .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
1691    };
1692    result = wsi->CreateBuffer(chain->device, &buffer_info,
1693                               &chain->alloc, &image->blit.buffer);
1694    if (result != VK_SUCCESS)
1695       return result;
1696 
1697    VkMemoryRequirements reqs;
1698    wsi->GetBufferMemoryRequirements(chain->device, image->blit.buffer, &reqs);
1699    assert(reqs.size <= info->linear_size);
1700 
1701    struct wsi_memory_allocate_info memory_wsi_info = {
1702       .sType = VK_STRUCTURE_TYPE_WSI_MEMORY_ALLOCATE_INFO_MESA,
1703       .pNext = NULL,
1704       .implicit_sync = implicit_sync,
1705    };
1706    VkMemoryDedicatedAllocateInfo buf_mem_dedicated_info = {
1707       .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
1708       .pNext = &memory_wsi_info,
1709       .image = VK_NULL_HANDLE,
1710       .buffer = image->blit.buffer,
1711    };
1712    VkMemoryAllocateInfo buf_mem_info = {
1713       .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
1714       .pNext = &buf_mem_dedicated_info,
1715       .allocationSize = info->linear_size,
1716       .memoryTypeIndex =
1717          info->select_blit_dst_memory_type(wsi, reqs.memoryTypeBits),
1718    };
1719 
1720    void *sw_host_ptr = NULL;
1721    if (info->alloc_shm)
1722       sw_host_ptr = info->alloc_shm(image, info->linear_size);
1723 
1724    VkExportMemoryAllocateInfo memory_export_info;
1725    VkImportMemoryHostPointerInfoEXT host_ptr_info;
1726    if (sw_host_ptr != NULL) {
1727       host_ptr_info = (VkImportMemoryHostPointerInfoEXT) {
1728          .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT,
1729          .pHostPointer = sw_host_ptr,
1730          .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT,
1731       };
1732       __vk_append_struct(&buf_mem_info, &host_ptr_info);
1733    } else if (handle_types != 0) {
1734       memory_export_info = (VkExportMemoryAllocateInfo) {
1735          .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO,
1736          .handleTypes = handle_types,
1737       };
1738       __vk_append_struct(&buf_mem_info, &memory_export_info);
1739    }
1740 
1741    result = wsi->AllocateMemory(chain->device, &buf_mem_info,
1742                                 &chain->alloc, &image->blit.memory);
1743    if (result != VK_SUCCESS)
1744       return result;
1745 
1746    result = wsi->BindBufferMemory(chain->device, image->blit.buffer,
1747                                   image->blit.memory, 0);
1748    if (result != VK_SUCCESS)
1749       return result;
1750 
1751    wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
1752 
1753    const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
1754       .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
1755       .pNext = NULL,
1756       .image = image->image,
1757       .buffer = VK_NULL_HANDLE,
1758    };
1759    const VkMemoryAllocateInfo memory_info = {
1760       .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
1761       .pNext = &memory_dedicated_info,
1762       .allocationSize = reqs.size,
1763       .memoryTypeIndex =
1764          info->select_image_memory_type(wsi, reqs.memoryTypeBits),
1765    };
1766 
1767    result = wsi->AllocateMemory(chain->device, &memory_info,
1768                                 &chain->alloc, &image->memory);
1769    if (result != VK_SUCCESS)
1770       return result;
1771 
1772    image->num_planes = 1;
1773    image->sizes[0] = info->linear_size;
1774    image->row_pitches[0] = info->linear_stride;
1775    image->offsets[0] = 0;
1776 
1777    return VK_SUCCESS;
1778 }
1779 
1780 VkResult
wsi_finish_create_blit_context(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)1781 wsi_finish_create_blit_context(const struct wsi_swapchain *chain,
1782                                const struct wsi_image_info *info,
1783                                struct wsi_image *image)
1784 {
1785    const struct wsi_device *wsi = chain->wsi;
1786    VkResult result;
1787 
1788    int cmd_buffer_count =
1789       chain->blit.queue != VK_NULL_HANDLE ? 1 : wsi->queue_family_count;
1790    image->blit.cmd_buffers =
1791       vk_zalloc(&chain->alloc,
1792                 sizeof(VkCommandBuffer) * cmd_buffer_count, 8,
1793                 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1794    if (!image->blit.cmd_buffers)
1795       return VK_ERROR_OUT_OF_HOST_MEMORY;
1796 
1797    for (uint32_t i = 0; i < cmd_buffer_count; i++) {
1798       const VkCommandBufferAllocateInfo cmd_buffer_info = {
1799          .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
1800          .pNext = NULL,
1801          .commandPool = chain->cmd_pools[i],
1802          .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
1803          .commandBufferCount = 1,
1804       };
1805       result = wsi->AllocateCommandBuffers(chain->device, &cmd_buffer_info,
1806                                            &image->blit.cmd_buffers[i]);
1807       if (result != VK_SUCCESS)
1808          return result;
1809 
1810       const VkCommandBufferBeginInfo begin_info = {
1811          .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
1812       };
1813       wsi->BeginCommandBuffer(image->blit.cmd_buffers[i], &begin_info);
1814 
1815       VkImageMemoryBarrier img_mem_barriers[] = {
1816          {
1817             .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
1818             .pNext = NULL,
1819             .srcAccessMask = 0,
1820             .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
1821             .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
1822             .newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1823             .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1824             .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1825             .image = image->image,
1826             .subresourceRange = {
1827                .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1828                .baseMipLevel = 0,
1829                .levelCount = 1,
1830                .baseArrayLayer = 0,
1831                .layerCount = 1,
1832             },
1833          },
1834          {
1835             .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
1836             .pNext = NULL,
1837             .srcAccessMask = 0,
1838             .dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
1839             .oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
1840             .newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1841             .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1842             .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
1843             .image = image->blit.image,
1844             .subresourceRange = {
1845                .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1846                .baseMipLevel = 0,
1847                .levelCount = 1,
1848                .baseArrayLayer = 0,
1849                .layerCount = 1,
1850             },
1851          },
1852       };
1853       uint32_t img_mem_barrier_count =
1854          chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT ? 1 : 2;
1855       wsi->CmdPipelineBarrier(image->blit.cmd_buffers[i],
1856                               VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
1857                               VK_PIPELINE_STAGE_TRANSFER_BIT,
1858                               0,
1859                               0, NULL,
1860                               0, NULL,
1861                               1, img_mem_barriers);
1862 
1863       if (chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT) {
1864          struct VkBufferImageCopy buffer_image_copy = {
1865             .bufferOffset = 0,
1866             .bufferRowLength = info->linear_stride /
1867                                vk_format_get_blocksize(info->create.format),
1868             .bufferImageHeight = 0,
1869             .imageSubresource = {
1870                .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1871                .mipLevel = 0,
1872                .baseArrayLayer = 0,
1873                .layerCount = 1,
1874             },
1875             .imageOffset = { .x = 0, .y = 0, .z = 0 },
1876             .imageExtent = info->create.extent,
1877          };
1878          wsi->CmdCopyImageToBuffer(image->blit.cmd_buffers[i],
1879                                    image->image,
1880                                    VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1881                                    image->blit.buffer,
1882                                    1, &buffer_image_copy);
1883       } else {
1884          struct VkImageCopy image_copy = {
1885             .srcSubresource = {
1886                .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1887                .mipLevel = 0,
1888                .baseArrayLayer = 0,
1889                .layerCount = 1,
1890             },
1891             .srcOffset = { .x = 0, .y = 0, .z = 0 },
1892             .dstSubresource = {
1893                .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1894                .mipLevel = 0,
1895                .baseArrayLayer = 0,
1896                .layerCount = 1,
1897             },
1898             .dstOffset = { .x = 0, .y = 0, .z = 0 },
1899             .extent = info->create.extent,
1900          };
1901 
1902          wsi->CmdCopyImage(image->blit.cmd_buffers[i],
1903                            image->image,
1904                            VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1905                            image->blit.image,
1906                            VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1907                            1, &image_copy);
1908       }
1909 
1910       img_mem_barriers[0].srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
1911       img_mem_barriers[0].dstAccessMask = 0;
1912       img_mem_barriers[0].oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
1913       img_mem_barriers[0].newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
1914       img_mem_barriers[1].srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
1915       img_mem_barriers[1].dstAccessMask = 0;
1916       img_mem_barriers[1].oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
1917       img_mem_barriers[1].newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
1918       wsi->CmdPipelineBarrier(image->blit.cmd_buffers[i],
1919                               VK_PIPELINE_STAGE_TRANSFER_BIT,
1920                               VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
1921                               0,
1922                               0, NULL,
1923                               0, NULL,
1924                               img_mem_barrier_count, img_mem_barriers);
1925 
1926       result = wsi->EndCommandBuffer(image->blit.cmd_buffers[i]);
1927       if (result != VK_SUCCESS)
1928          return result;
1929    }
1930 
1931    return VK_SUCCESS;
1932 }
1933 
1934 void
wsi_configure_buffer_image(UNUSED const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,uint32_t stride_align,uint32_t size_align,struct wsi_image_info * info)1935 wsi_configure_buffer_image(UNUSED const struct wsi_swapchain *chain,
1936                            const VkSwapchainCreateInfoKHR *pCreateInfo,
1937                            uint32_t stride_align, uint32_t size_align,
1938                            struct wsi_image_info *info)
1939 {
1940    const struct wsi_device *wsi = chain->wsi;
1941 
1942    assert(util_is_power_of_two_nonzero(stride_align));
1943    assert(util_is_power_of_two_nonzero(size_align));
1944 
1945    info->create.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
1946    info->wsi.blit_src = true;
1947 
1948    const uint32_t cpp = vk_format_get_blocksize(pCreateInfo->imageFormat);
1949    info->linear_stride = pCreateInfo->imageExtent.width * cpp;
1950    info->linear_stride = ALIGN_POT(info->linear_stride, stride_align);
1951 
1952    /* Since we can pick the stride to be whatever we want, also align to the
1953     * device's optimalBufferCopyRowPitchAlignment so we get efficient copies.
1954     */
1955    assert(wsi->optimalBufferCopyRowPitchAlignment > 0);
1956    info->linear_stride = ALIGN_POT(info->linear_stride,
1957                                    wsi->optimalBufferCopyRowPitchAlignment);
1958 
1959    info->linear_size = info->linear_stride * pCreateInfo->imageExtent.height;
1960    info->linear_size = ALIGN_POT(info->linear_size, size_align);
1961 
1962    info->finish_create = wsi_finish_create_blit_context;
1963 }
1964 
1965 void
wsi_configure_image_blit_image(UNUSED const struct wsi_swapchain * chain,struct wsi_image_info * info)1966 wsi_configure_image_blit_image(UNUSED const struct wsi_swapchain *chain,
1967                                struct wsi_image_info *info)
1968 {
1969    info->create.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
1970    info->wsi.blit_src = true;
1971    info->finish_create = wsi_finish_create_blit_context;
1972 }
1973 
1974 static VkResult
wsi_create_cpu_linear_image_mem(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)1975 wsi_create_cpu_linear_image_mem(const struct wsi_swapchain *chain,
1976                                 const struct wsi_image_info *info,
1977                                 struct wsi_image *image)
1978 {
1979    const struct wsi_device *wsi = chain->wsi;
1980    VkResult result;
1981 
1982    VkMemoryRequirements reqs;
1983    wsi->GetImageMemoryRequirements(chain->device, image->image, &reqs);
1984 
1985    VkSubresourceLayout layout;
1986    wsi->GetImageSubresourceLayout(chain->device, image->image,
1987                                   &(VkImageSubresource) {
1988                                      .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
1989                                      .mipLevel = 0,
1990                                      .arrayLayer = 0,
1991                                   }, &layout);
1992    assert(layout.offset == 0);
1993 
1994    const VkMemoryDedicatedAllocateInfo memory_dedicated_info = {
1995       .sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO,
1996       .image = image->image,
1997       .buffer = VK_NULL_HANDLE,
1998    };
1999    VkMemoryAllocateInfo memory_info = {
2000       .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
2001       .pNext = &memory_dedicated_info,
2002       .allocationSize = reqs.size,
2003       .memoryTypeIndex =
2004          wsi_select_host_memory_type(wsi, reqs.memoryTypeBits),
2005    };
2006 
2007    void *sw_host_ptr = NULL;
2008    if (info->alloc_shm)
2009       sw_host_ptr = info->alloc_shm(image, layout.size);
2010 
2011    VkImportMemoryHostPointerInfoEXT host_ptr_info;
2012    if (sw_host_ptr != NULL) {
2013       host_ptr_info = (VkImportMemoryHostPointerInfoEXT) {
2014          .sType = VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT,
2015          .pHostPointer = sw_host_ptr,
2016          .handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT,
2017       };
2018       __vk_append_struct(&memory_info, &host_ptr_info);
2019    }
2020 
2021    result = wsi->AllocateMemory(chain->device, &memory_info,
2022                                 &chain->alloc, &image->memory);
2023    if (result != VK_SUCCESS)
2024       return result;
2025 
2026    result = wsi->MapMemory(chain->device, image->memory,
2027                            0, VK_WHOLE_SIZE, 0, &image->cpu_map);
2028    if (result != VK_SUCCESS)
2029       return result;
2030 
2031    image->num_planes = 1;
2032    image->sizes[0] = reqs.size;
2033    image->row_pitches[0] = layout.rowPitch;
2034    image->offsets[0] = 0;
2035 
2036    return VK_SUCCESS;
2037 }
2038 
2039 static VkResult
wsi_create_cpu_buffer_image_mem(const struct wsi_swapchain * chain,const struct wsi_image_info * info,struct wsi_image * image)2040 wsi_create_cpu_buffer_image_mem(const struct wsi_swapchain *chain,
2041                                 const struct wsi_image_info *info,
2042                                 struct wsi_image *image)
2043 {
2044    VkResult result;
2045 
2046    result = wsi_create_buffer_blit_context(chain, info, image, 0,
2047                                            false /* implicit_sync */);
2048    if (result != VK_SUCCESS)
2049       return result;
2050 
2051    result = chain->wsi->MapMemory(chain->device, image->blit.memory,
2052                                   0, VK_WHOLE_SIZE, 0, &image->cpu_map);
2053    if (result != VK_SUCCESS)
2054       return result;
2055 
2056    return VK_SUCCESS;
2057 }
2058 
2059 bool
wsi_cpu_image_needs_buffer_blit(const struct wsi_device * wsi,const struct wsi_cpu_image_params * params)2060 wsi_cpu_image_needs_buffer_blit(const struct wsi_device *wsi,
2061                                 const struct wsi_cpu_image_params *params)
2062 {
2063    if (WSI_DEBUG & WSI_DEBUG_BUFFER)
2064       return true;
2065 
2066    if (wsi->wants_linear)
2067       return false;
2068 
2069    return true;
2070 }
2071 
2072 VkResult
wsi_configure_cpu_image(const struct wsi_swapchain * chain,const VkSwapchainCreateInfoKHR * pCreateInfo,const struct wsi_cpu_image_params * params,struct wsi_image_info * info)2073 wsi_configure_cpu_image(const struct wsi_swapchain *chain,
2074                         const VkSwapchainCreateInfoKHR *pCreateInfo,
2075                         const struct wsi_cpu_image_params *params,
2076                         struct wsi_image_info *info)
2077 {
2078    assert(params->base.image_type == WSI_IMAGE_TYPE_CPU);
2079    assert(chain->blit.type == WSI_SWAPCHAIN_NO_BLIT ||
2080           chain->blit.type == WSI_SWAPCHAIN_BUFFER_BLIT);
2081 
2082    VkExternalMemoryHandleTypeFlags handle_types = 0;
2083    if (params->alloc_shm && chain->blit.type != WSI_SWAPCHAIN_NO_BLIT)
2084       handle_types = VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT;
2085 
2086    VkResult result = wsi_configure_image(chain, pCreateInfo,
2087                                          handle_types, info);
2088    if (result != VK_SUCCESS)
2089       return result;
2090 
2091    if (chain->blit.type != WSI_SWAPCHAIN_NO_BLIT) {
2092       wsi_configure_buffer_image(chain, pCreateInfo,
2093                                  1 /* stride_align */,
2094                                  1 /* size_align */,
2095                                  info);
2096 
2097       info->select_blit_dst_memory_type = wsi_select_host_memory_type;
2098       info->select_image_memory_type = wsi_select_device_memory_type;
2099       info->create_mem = wsi_create_cpu_buffer_image_mem;
2100    } else {
2101       /* Force the image to be linear */
2102       info->create.tiling = VK_IMAGE_TILING_LINEAR;
2103 
2104       info->create_mem = wsi_create_cpu_linear_image_mem;
2105    }
2106 
2107    info->alloc_shm = params->alloc_shm;
2108 
2109    return VK_SUCCESS;
2110 }
2111 
2112 VKAPI_ATTR VkResult VKAPI_CALL
wsi_WaitForPresentKHR(VkDevice device,VkSwapchainKHR _swapchain,uint64_t presentId,uint64_t timeout)2113 wsi_WaitForPresentKHR(VkDevice device, VkSwapchainKHR _swapchain,
2114                       uint64_t presentId, uint64_t timeout)
2115 {
2116    VK_FROM_HANDLE(wsi_swapchain, swapchain, _swapchain);
2117    assert(swapchain->wait_for_present);
2118    return swapchain->wait_for_present(swapchain, presentId, timeout);
2119 }
2120