1 /*
2 * Copyright 2024 Valve Corporation
3 * Copyright 2024 Alyssa Rosenzweig
4 * Copyright 2022-2023 Collabora Ltd. and Red Hat Inc.
5 * SPDX-License-Identifier: MIT
6 */
7 #include "hk_buffer.h"
8
9 #include "agx_bo.h"
10 #include "agx_device.h"
11 #include "hk_device.h"
12 #include "hk_device_memory.h"
13 #include "hk_entrypoints.h"
14 #include "hk_physical_device.h"
15
16 static uint32_t
hk_get_buffer_alignment(const struct hk_physical_device * pdev,VkBufferUsageFlags2KHR usage_flags,VkBufferCreateFlags create_flags)17 hk_get_buffer_alignment(const struct hk_physical_device *pdev,
18 VkBufferUsageFlags2KHR usage_flags,
19 VkBufferCreateFlags create_flags)
20 {
21 uint32_t alignment = 16;
22
23 if (usage_flags & VK_BUFFER_USAGE_2_UNIFORM_BUFFER_BIT_KHR)
24 alignment = MAX2(alignment, HK_MIN_UBO_ALIGNMENT);
25
26 if (usage_flags & VK_BUFFER_USAGE_2_STORAGE_BUFFER_BIT_KHR)
27 alignment = MAX2(alignment, HK_MIN_SSBO_ALIGNMENT);
28
29 if (usage_flags & (VK_BUFFER_USAGE_2_UNIFORM_TEXEL_BUFFER_BIT_KHR |
30 VK_BUFFER_USAGE_2_STORAGE_TEXEL_BUFFER_BIT_KHR))
31 alignment = MAX2(alignment, HK_MIN_TEXEL_BUFFER_ALIGNMENT);
32
33 if (create_flags & (VK_BUFFER_CREATE_SPARSE_BINDING_BIT |
34 VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT))
35 alignment = MAX2(alignment, 16384);
36
37 return alignment;
38 }
39
40 static uint64_t
hk_get_bda_replay_addr(const VkBufferCreateInfo * pCreateInfo)41 hk_get_bda_replay_addr(const VkBufferCreateInfo *pCreateInfo)
42 {
43 uint64_t addr = 0;
44 vk_foreach_struct_const(ext, pCreateInfo->pNext) {
45 switch (ext->sType) {
46 case VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO: {
47 const VkBufferOpaqueCaptureAddressCreateInfo *bda = (void *)ext;
48 if (bda->opaqueCaptureAddress != 0) {
49 #ifdef NDEBUG
50 return bda->opaqueCaptureAddress;
51 #else
52 assert(addr == 0 || bda->opaqueCaptureAddress == addr);
53 addr = bda->opaqueCaptureAddress;
54 #endif
55 }
56 break;
57 }
58
59 case VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT: {
60 const VkBufferDeviceAddressCreateInfoEXT *bda = (void *)ext;
61 if (bda->deviceAddress != 0) {
62 #ifdef NDEBUG
63 return bda->deviceAddress;
64 #else
65 assert(addr == 0 || bda->deviceAddress == addr);
66 addr = bda->deviceAddress;
67 #endif
68 }
69 break;
70 }
71
72 default:
73 break;
74 }
75 }
76
77 return addr;
78 }
79
80 VKAPI_ATTR VkResult VKAPI_CALL
hk_CreateBuffer(VkDevice device,const VkBufferCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkBuffer * pBuffer)81 hk_CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
82 const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer)
83 {
84 VK_FROM_HANDLE(hk_device, dev, device);
85 struct hk_buffer *buffer;
86
87 if (pCreateInfo->size > HK_MAX_BUFFER_SIZE)
88 return vk_error(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY);
89
90 buffer =
91 vk_buffer_create(&dev->vk, pCreateInfo, pAllocator, sizeof(*buffer));
92 if (!buffer)
93 return vk_error(dev, VK_ERROR_OUT_OF_HOST_MEMORY);
94
95 if (buffer->vk.size > 0 &&
96 (buffer->vk.create_flags &
97 (VK_BUFFER_CREATE_SPARSE_BINDING_BIT |
98 VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT))) {
99
100 const uint32_t alignment = hk_get_buffer_alignment(
101 hk_device_physical(dev), buffer->vk.usage, buffer->vk.create_flags);
102 assert(alignment >= 16384);
103 uint64_t vma_size_B = align64(buffer->vk.size, alignment);
104
105 const bool bda_capture_replay =
106 buffer->vk.create_flags &
107 VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT;
108
109 enum agx_va_flags flags = 0;
110 uint64_t bda_fixed_addr = 0;
111 if (bda_capture_replay) {
112 bda_fixed_addr = hk_get_bda_replay_addr(pCreateInfo);
113 if (bda_fixed_addr != 0)
114 flags |= AGX_VA_FIXED;
115 }
116
117 buffer->va =
118 agx_va_alloc(&dev->dev, vma_size_B, alignment, flags, bda_fixed_addr);
119
120 if (!buffer->va) {
121 vk_buffer_destroy(&dev->vk, pAllocator, &buffer->vk);
122 return vk_errorf(dev, VK_ERROR_OUT_OF_DEVICE_MEMORY,
123 "Sparse VMA allocation failed");
124 }
125 buffer->addr = buffer->va->addr;
126 }
127
128 *pBuffer = hk_buffer_to_handle(buffer);
129
130 return VK_SUCCESS;
131 }
132
133 VKAPI_ATTR void VKAPI_CALL
hk_DestroyBuffer(VkDevice device,VkBuffer _buffer,const VkAllocationCallbacks * pAllocator)134 hk_DestroyBuffer(VkDevice device, VkBuffer _buffer,
135 const VkAllocationCallbacks *pAllocator)
136 {
137 VK_FROM_HANDLE(hk_device, dev, device);
138 VK_FROM_HANDLE(hk_buffer, buffer, _buffer);
139
140 if (!buffer)
141 return;
142
143 if (buffer->va) {
144 // TODO
145 // agx_bo_unbind_vma(dev->ws_dev, buffer->addr, buffer->vma_size_B);
146 agx_va_free(&dev->dev, buffer->va);
147 }
148
149 vk_buffer_destroy(&dev->vk, pAllocator, &buffer->vk);
150 }
151
152 VKAPI_ATTR void VKAPI_CALL
hk_GetDeviceBufferMemoryRequirements(VkDevice device,const VkDeviceBufferMemoryRequirements * pInfo,VkMemoryRequirements2 * pMemoryRequirements)153 hk_GetDeviceBufferMemoryRequirements(
154 VkDevice device, const VkDeviceBufferMemoryRequirements *pInfo,
155 VkMemoryRequirements2 *pMemoryRequirements)
156 {
157 VK_FROM_HANDLE(hk_device, dev, device);
158 struct hk_physical_device *pdev = hk_device_physical(dev);
159
160 const uint32_t alignment = hk_get_buffer_alignment(
161 hk_device_physical(dev), pInfo->pCreateInfo->usage,
162 pInfo->pCreateInfo->flags);
163
164 pMemoryRequirements->memoryRequirements = (VkMemoryRequirements){
165 .size = align64(pInfo->pCreateInfo->size, alignment),
166 .alignment = alignment,
167 .memoryTypeBits = BITFIELD_MASK(pdev->mem_type_count),
168 };
169
170 vk_foreach_struct_const(ext, pMemoryRequirements->pNext) {
171 switch (ext->sType) {
172 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
173 VkMemoryDedicatedRequirements *dedicated = (void *)ext;
174 dedicated->prefersDedicatedAllocation = false;
175 dedicated->requiresDedicatedAllocation = false;
176 break;
177 }
178 default:
179 vk_debug_ignored_stype(ext->sType);
180 break;
181 }
182 }
183 }
184
185 VKAPI_ATTR void VKAPI_CALL
hk_GetPhysicalDeviceExternalBufferProperties(VkPhysicalDevice physicalDevice,const VkPhysicalDeviceExternalBufferInfo * pExternalBufferInfo,VkExternalBufferProperties * pExternalBufferProperties)186 hk_GetPhysicalDeviceExternalBufferProperties(
187 VkPhysicalDevice physicalDevice,
188 const VkPhysicalDeviceExternalBufferInfo *pExternalBufferInfo,
189 VkExternalBufferProperties *pExternalBufferProperties)
190 {
191 /* The Vulkan 1.3.256 spec says:
192 *
193 * VUID-VkPhysicalDeviceExternalBufferInfo-handleType-parameter
194 *
195 * "handleType must be a valid VkExternalMemoryHandleTypeFlagBits value"
196 *
197 * This differs from VkPhysicalDeviceExternalImageFormatInfo, which
198 * surprisingly permits handleType == 0.
199 */
200 assert(pExternalBufferInfo->handleType != 0);
201
202 /* All of the current flags are for sparse which we don't support yet.
203 * Even when we do support it, doing sparse on external memory sounds
204 * sketchy. Also, just disallowing flags is the safe option.
205 */
206 if (pExternalBufferInfo->flags)
207 goto unsupported;
208
209 switch (pExternalBufferInfo->handleType) {
210 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
211 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
212 pExternalBufferProperties->externalMemoryProperties =
213 hk_dma_buf_mem_props;
214 return;
215 default:
216 goto unsupported;
217 }
218
219 unsupported:
220 /* From the Vulkan 1.3.256 spec:
221 *
222 * compatibleHandleTypes must include at least handleType.
223 */
224 pExternalBufferProperties->externalMemoryProperties =
225 (VkExternalMemoryProperties){
226 .compatibleHandleTypes = pExternalBufferInfo->handleType,
227 };
228 }
229
230 VKAPI_ATTR VkResult VKAPI_CALL
hk_BindBufferMemory2(VkDevice device,uint32_t bindInfoCount,const VkBindBufferMemoryInfo * pBindInfos)231 hk_BindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
232 const VkBindBufferMemoryInfo *pBindInfos)
233 {
234 for (uint32_t i = 0; i < bindInfoCount; ++i) {
235 VK_FROM_HANDLE(hk_device_memory, mem, pBindInfos[i].memory);
236 VK_FROM_HANDLE(hk_buffer, buffer, pBindInfos[i].buffer);
237
238 if (buffer->va) {
239 VK_FROM_HANDLE(hk_device, dev, device);
240 dev->dev.ops.bo_bind(&dev->dev, mem->bo, buffer->addr,
241 buffer->va->size_B, pBindInfos[i].memoryOffset,
242 ASAHI_BIND_READ | ASAHI_BIND_WRITE, false);
243 } else {
244 buffer->addr = mem->bo->va->addr + pBindInfos[i].memoryOffset;
245 }
246
247 const VkBindMemoryStatusKHR *status =
248 vk_find_struct_const(pBindInfos[i].pNext, BIND_MEMORY_STATUS_KHR);
249 if (status != NULL && status->pResult != NULL)
250 *status->pResult = VK_SUCCESS;
251 }
252 return VK_SUCCESS;
253 }
254
255 VKAPI_ATTR VkDeviceAddress VKAPI_CALL
hk_GetBufferDeviceAddress(UNUSED VkDevice device,const VkBufferDeviceAddressInfo * pInfo)256 hk_GetBufferDeviceAddress(UNUSED VkDevice device,
257 const VkBufferDeviceAddressInfo *pInfo)
258 {
259 VK_FROM_HANDLE(hk_buffer, buffer, pInfo->buffer);
260
261 return hk_buffer_address(buffer, 0);
262 }
263
264 VKAPI_ATTR uint64_t VKAPI_CALL
hk_GetBufferOpaqueCaptureAddress(UNUSED VkDevice device,const VkBufferDeviceAddressInfo * pInfo)265 hk_GetBufferOpaqueCaptureAddress(UNUSED VkDevice device,
266 const VkBufferDeviceAddressInfo *pInfo)
267 {
268 VK_FROM_HANDLE(hk_buffer, buffer, pInfo->buffer);
269
270 return hk_buffer_address(buffer, 0);
271 }
272