xref: /aosp_15_r20/external/mesa3d/src/virtio/vulkan/vn_wsi.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10 
11 #include "vn_wsi.h"
12 
13 #include "vk_enum_to_str.h"
14 #include "wsi_common_entrypoints.h"
15 
16 #include "vn_device.h"
17 #include "vn_image.h"
18 #include "vn_instance.h"
19 #include "vn_physical_device.h"
20 #include "vn_queue.h"
21 
22 /* The common WSI support makes some assumptions about the driver.
23  *
24  * In wsi_device_init, it assumes VK_EXT_pci_bus_info is available.  In
25  * wsi_create_native_image and wsi_create_prime_image, it assumes
26  * VK_KHR_external_memory_fd and VK_EXT_external_memory_dma_buf are enabled.
27  *
28  * In wsi_create_native_image, if wsi_device::supports_modifiers is set and
29  * the window system supports modifiers, it assumes
30  * VK_EXT_image_drm_format_modifier is enabled.  Otherwise, it assumes that
31  * wsi_image_create_info can be chained to VkImageCreateInfo and
32  * vkGetImageSubresourceLayout can be called even the tiling is
33  * VK_IMAGE_TILING_OPTIMAL.
34  *
35  * Together, it knows how to share dma-bufs, with explicit or implicit
36  * modifiers, to the window system.
37  *
38  * For venus, we use explicit modifiers when the renderer and the window
39  * system support them.  Otherwise, we have to fall back to
40  * VK_IMAGE_TILING_LINEAR (or trigger the prime blit path).  But the fallback
41  * can be problematic when the memory is scanned out directly and special
42  * requirements (e.g., alignments) must be met.
43  *
44  * The common WSI support makes other assumptions about the driver to support
45  * implicit fencing.  In wsi_create_native_image and wsi_create_prime_image,
46  * it assumes wsi_memory_allocate_info can be chained to VkMemoryAllocateInfo.
47  * In wsi_common_queue_present, it assumes wsi_memory_signal_submit_info can
48  * be chained to VkSubmitInfo.  Finally, in wsi_common_acquire_next_image2, it
49  * calls wsi_device::signal_semaphore_for_memory, and
50  * wsi_device::signal_fence_for_memory if the driver provides them.
51  *
52  * Some drivers use wsi_memory_allocate_info to set up implicit fencing.
53  * Others use wsi_memory_signal_submit_info to set up implicit IN-fences and
54  * use wsi_device::signal_*_for_memory to set up implicit OUT-fences.
55  *
56  * For venus, implicit fencing is broken (and there is no explicit fencing
57  * support yet).  The kernel driver assumes everything is in the same fence
58  * context and no synchronization is needed.  It should be fixed for
59  * correctness, but it is still not ideal.  venus requires explicit fencing
60  * (and renderer-side synchronization) to work well.
61  */
62 
63 /* cast a WSI object to a pointer for logging */
64 #define VN_WSI_PTR(obj) ((const void *)(uintptr_t)(obj))
65 
66 static PFN_vkVoidFunction
vn_wsi_proc_addr(VkPhysicalDevice physicalDevice,const char * pName)67 vn_wsi_proc_addr(VkPhysicalDevice physicalDevice, const char *pName)
68 {
69    struct vn_physical_device *physical_dev =
70       vn_physical_device_from_handle(physicalDevice);
71    return vk_instance_get_proc_addr_unchecked(
72       &physical_dev->instance->base.base, pName);
73 }
74 
75 VkResult
vn_wsi_init(struct vn_physical_device * physical_dev)76 vn_wsi_init(struct vn_physical_device *physical_dev)
77 {
78    const VkAllocationCallbacks *alloc =
79       &physical_dev->instance->base.base.alloc;
80    VkResult result = wsi_device_init(
81       &physical_dev->wsi_device, vn_physical_device_to_handle(physical_dev),
82       vn_wsi_proc_addr, alloc, -1, &physical_dev->instance->dri_options,
83       &(struct wsi_device_options){
84          .sw_device = false,
85          .extra_xwayland_image = true,
86       });
87    if (result != VK_SUCCESS)
88       return result;
89 
90    physical_dev->wsi_device.supports_modifiers = true;
91    physical_dev->base.base.wsi_device = &physical_dev->wsi_device;
92 
93    return VK_SUCCESS;
94 }
95 
96 void
vn_wsi_fini(struct vn_physical_device * physical_dev)97 vn_wsi_fini(struct vn_physical_device *physical_dev)
98 {
99    const VkAllocationCallbacks *alloc =
100       &physical_dev->instance->base.base.alloc;
101    physical_dev->base.base.wsi_device = NULL;
102    wsi_device_finish(&physical_dev->wsi_device, alloc);
103 }
104 
105 VkResult
vn_wsi_create_image(struct vn_device * dev,const VkImageCreateInfo * create_info,const struct wsi_image_create_info * wsi_info,const VkAllocationCallbacks * alloc,struct vn_image ** out_img)106 vn_wsi_create_image(struct vn_device *dev,
107                     const VkImageCreateInfo *create_info,
108                     const struct wsi_image_create_info *wsi_info,
109                     const VkAllocationCallbacks *alloc,
110                     struct vn_image **out_img)
111 {
112    /* TODO This is the legacy path used by wsi_create_native_image when there
113     * is no modifier support.  Instead of forcing linear tiling, we should ask
114     * wsi to use wsi_create_prime_image instead.
115     *
116     * In fact, this is not enough when the image is truely used for scanout by
117     * the host compositor.  There can be requirements we fail to meet.  We
118     * should require modifier support at some point.
119     */
120    const uint64_t modifier = DRM_FORMAT_MOD_LINEAR;
121    const VkImageDrmFormatModifierListCreateInfoEXT mod_list_info = {
122       .sType =
123          VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT,
124       .pNext = create_info->pNext,
125       .drmFormatModifierCount = 1,
126       .pDrmFormatModifiers = &modifier,
127    };
128    VkImageCreateInfo local_create_info = *create_info;
129    create_info = &local_create_info;
130    if (wsi_info->scanout) {
131       assert(!vk_find_struct_const(
132          create_info->pNext, IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT));
133 
134       local_create_info.pNext = &mod_list_info;
135       local_create_info.tiling = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
136 
137       if (VN_DEBUG(WSI)) {
138          vn_log(
139             dev->instance,
140             "forcing scanout image linear (no explicit modifier support)");
141       }
142    } else {
143       if (dev->physical_device->renderer_driver_id ==
144           VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA) {
145          /* See explanation in vn_GetPhysicalDeviceImageFormatProperties2() */
146          local_create_info.flags &= ~VK_IMAGE_CREATE_ALIAS_BIT;
147       }
148 
149       if (VN_PERF(NO_TILED_WSI_IMAGE)) {
150          const VkImageDrmFormatModifierListCreateInfoEXT *modifier_info =
151             vk_find_struct_const(
152                create_info->pNext,
153                IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT);
154          assert(modifier_info);
155          assert(modifier_info->drmFormatModifierCount == 1 &&
156                 modifier_info->pDrmFormatModifiers[0] ==
157                    DRM_FORMAT_MOD_LINEAR);
158          if (VN_DEBUG(WSI)) {
159             vn_log(dev->instance,
160                    "forcing scanout image linear (given no_tiled_wsi_image)");
161          }
162       }
163    }
164 
165    struct vn_image *img;
166    VkResult result = vn_image_create(dev, create_info, alloc, &img);
167    if (result != VK_SUCCESS)
168       return result;
169 
170    img->wsi.is_wsi = true;
171    img->wsi.is_prime_blit_src = wsi_info->blit_src;
172    img->wsi.tiling_override = create_info->tiling;
173 
174    if (create_info->tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
175       VkDevice dev_handle = vn_device_to_handle(dev);
176       VkImage img_handle = vn_image_to_handle(img);
177 
178       VkImageDrmFormatModifierPropertiesEXT props = {
179          .sType = VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT,
180       };
181       result = vn_GetImageDrmFormatModifierPropertiesEXT(dev_handle,
182                                                          img_handle, &props);
183       if (result != VK_SUCCESS) {
184          vn_DestroyImage(dev_handle, img_handle, alloc);
185          return result;
186       }
187 
188       img->wsi.drm_format_modifier = props.drmFormatModifier;
189    }
190 
191    *out_img = img;
192    return VK_SUCCESS;
193 }
194 
195 VkResult
vn_wsi_create_image_from_swapchain(struct vn_device * dev,const VkImageCreateInfo * create_info,const VkImageSwapchainCreateInfoKHR * swapchain_info,const VkAllocationCallbacks * alloc,struct vn_image ** out_img)196 vn_wsi_create_image_from_swapchain(
197    struct vn_device *dev,
198    const VkImageCreateInfo *create_info,
199    const VkImageSwapchainCreateInfoKHR *swapchain_info,
200    const VkAllocationCallbacks *alloc,
201    struct vn_image **out_img)
202 {
203    const struct vn_image *swapchain_img = vn_image_from_handle(
204       wsi_common_get_image(swapchain_info->swapchain, 0));
205    assert(swapchain_img->wsi.is_wsi);
206 
207    /* must match what the common WSI and vn_wsi_create_image do */
208    VkImageCreateInfo local_create_info = *create_info;
209 
210    /* match external memory */
211    const VkExternalMemoryImageCreateInfo local_external_info = {
212       .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO,
213       .pNext = local_create_info.pNext,
214       .handleTypes =
215          dev->physical_device->external_memory.renderer_handle_type,
216    };
217    local_create_info.pNext = &local_external_info;
218 
219    /* match image tiling */
220    local_create_info.tiling = swapchain_img->wsi.tiling_override;
221 
222    VkImageDrmFormatModifierListCreateInfoEXT local_mod_info;
223    if (local_create_info.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
224       local_mod_info = (const VkImageDrmFormatModifierListCreateInfoEXT){
225          .sType =
226             VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT,
227          .pNext = local_create_info.pNext,
228          .drmFormatModifierCount = 1,
229          .pDrmFormatModifiers = &swapchain_img->wsi.drm_format_modifier,
230       };
231       local_create_info.pNext = &local_mod_info;
232    }
233 
234    /* match image usage */
235    if (swapchain_img->wsi.is_prime_blit_src)
236       local_create_info.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
237 
238    create_info = &local_create_info;
239 
240    struct vn_image *img;
241    VkResult result = vn_image_create(dev, create_info, alloc, &img);
242    if (result != VK_SUCCESS)
243       return result;
244 
245    img->wsi.is_wsi = true;
246    img->wsi.tiling_override = swapchain_img->wsi.tiling_override;
247    img->wsi.drm_format_modifier = swapchain_img->wsi.drm_format_modifier;
248 
249    *out_img = img;
250    return VK_SUCCESS;
251 }
252 
253 /* swapchain commands */
254 
255 VkResult
vn_CreateSwapchainKHR(VkDevice device,const VkSwapchainCreateInfoKHR * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkSwapchainKHR * pSwapchain)256 vn_CreateSwapchainKHR(VkDevice device,
257                       const VkSwapchainCreateInfoKHR *pCreateInfo,
258                       const VkAllocationCallbacks *pAllocator,
259                       VkSwapchainKHR *pSwapchain)
260 {
261    struct vn_device *dev = vn_device_from_handle(device);
262 
263    VkResult result =
264       wsi_CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
265    if (VN_DEBUG(WSI) && result == VK_SUCCESS) {
266       vn_log(dev->instance,
267              "swapchain %p: created with surface %p, min count %d, size "
268              "%dx%d, mode %s, old %p",
269              VN_WSI_PTR(*pSwapchain), VN_WSI_PTR(pCreateInfo->surface),
270              pCreateInfo->minImageCount, pCreateInfo->imageExtent.width,
271              pCreateInfo->imageExtent.height,
272              vk_PresentModeKHR_to_str(pCreateInfo->presentMode),
273              VN_WSI_PTR(pCreateInfo->oldSwapchain));
274    }
275 
276    vn_tls_set_async_pipeline_create();
277 
278    return vn_result(dev->instance, result);
279 }
280 
281 void
vn_DestroySwapchainKHR(VkDevice device,VkSwapchainKHR swapchain,const VkAllocationCallbacks * pAllocator)282 vn_DestroySwapchainKHR(VkDevice device,
283                        VkSwapchainKHR swapchain,
284                        const VkAllocationCallbacks *pAllocator)
285 {
286    struct vn_device *dev = vn_device_from_handle(device);
287 
288    wsi_DestroySwapchainKHR(device, swapchain, pAllocator);
289    if (VN_DEBUG(WSI))
290       vn_log(dev->instance, "swapchain %p: destroyed", VN_WSI_PTR(swapchain));
291 }
292 
293 VkResult
vn_QueuePresentKHR(VkQueue _queue,const VkPresentInfoKHR * pPresentInfo)294 vn_QueuePresentKHR(VkQueue _queue, const VkPresentInfoKHR *pPresentInfo)
295 {
296    VN_TRACE_FUNC();
297    struct vk_queue *queue_vk = vk_queue_from_handle(_queue);
298    struct vn_device *dev = (void *)queue_vk->base.device;
299 
300    VkResult result = wsi_common_queue_present(
301       &dev->physical_device->wsi_device, vn_device_to_handle(dev), _queue,
302       queue_vk->queue_family_index, pPresentInfo);
303    if (VN_DEBUG(WSI) && result != VK_SUCCESS) {
304       for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
305          const VkResult r =
306             pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
307          vn_log(dev->instance, "swapchain %p: presented image %d: %s",
308                 VN_WSI_PTR(pPresentInfo->pSwapchains[i]),
309                 pPresentInfo->pImageIndices[i], vk_Result_to_str(r));
310       }
311    }
312 
313    return vn_result(dev->instance, result);
314 }
315 
316 VkResult
vn_AcquireNextImage2KHR(VkDevice device,const VkAcquireNextImageInfoKHR * pAcquireInfo,uint32_t * pImageIndex)317 vn_AcquireNextImage2KHR(VkDevice device,
318                         const VkAcquireNextImageInfoKHR *pAcquireInfo,
319                         uint32_t *pImageIndex)
320 {
321    VN_TRACE_FUNC();
322    struct vn_device *dev = vn_device_from_handle(device);
323 
324    VkResult result = wsi_common_acquire_next_image2(
325       &dev->physical_device->wsi_device, device, pAcquireInfo, pImageIndex);
326    if (VN_DEBUG(WSI) && result != VK_SUCCESS) {
327       const int idx = result >= VK_SUCCESS ? *pImageIndex : -1;
328       vn_log(dev->instance, "swapchain %p: acquired image %d: %s",
329              VN_WSI_PTR(pAcquireInfo->swapchain), idx,
330              vk_Result_to_str(result));
331    }
332 
333    /* XXX this relies on implicit sync */
334    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
335       struct vn_semaphore *sem =
336          vn_semaphore_from_handle(pAcquireInfo->semaphore);
337       if (sem)
338          vn_semaphore_signal_wsi(dev, sem);
339 
340       struct vn_fence *fence = vn_fence_from_handle(pAcquireInfo->fence);
341       if (fence)
342          vn_fence_signal_wsi(dev, fence);
343    }
344 
345    return vn_result(dev->instance, result);
346 }
347