1 /*
2 * Copyright © 2022 Collabora Ltd. and Red Hat Inc.
3 * SPDX-License-Identifier: MIT
4 */
5 #include "nvk_descriptor_table.h"
6
7 #include "nvk_device.h"
8 #include "nvk_physical_device.h"
9
10 #include <sys/mman.h>
11
12 static VkResult
nvk_descriptor_table_grow_locked(struct nvk_device * dev,struct nvk_descriptor_table * table,uint32_t new_alloc)13 nvk_descriptor_table_grow_locked(struct nvk_device *dev,
14 struct nvk_descriptor_table *table,
15 uint32_t new_alloc)
16 {
17 struct nvkmd_mem *new_mem;
18 BITSET_WORD *new_in_use;
19 uint32_t *new_free_table;
20 VkResult result;
21
22 assert(new_alloc > table->alloc && new_alloc <= table->max_alloc);
23
24 const uint32_t new_mem_size = new_alloc * table->desc_size;
25 result = nvkmd_dev_alloc_mapped_mem(dev->nvkmd, &dev->vk.base,
26 new_mem_size, 256,
27 NVKMD_MEM_LOCAL, NVKMD_MEM_MAP_WR,
28 &new_mem);
29 if (result != VK_SUCCESS)
30 return result;
31
32 if (table->mem) {
33 assert(new_mem_size >= table->mem->size_B);
34 memcpy(new_mem->map, table->mem->map, table->mem->size_B);
35 nvkmd_mem_unref(table->mem);
36 }
37 table->mem = new_mem;
38
39 assert((table->alloc % BITSET_WORDBITS) == 0);
40 assert((new_alloc % BITSET_WORDBITS) == 0);
41 const size_t old_in_use_size =
42 BITSET_WORDS(table->alloc) * sizeof(BITSET_WORD);
43 const size_t new_in_use_size =
44 BITSET_WORDS(new_alloc) * sizeof(BITSET_WORD);
45 new_in_use = vk_realloc(&dev->vk.alloc, table->in_use,
46 new_in_use_size, sizeof(BITSET_WORD),
47 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
48 if (new_in_use == NULL) {
49 return vk_errorf(dev, VK_ERROR_OUT_OF_HOST_MEMORY,
50 "Failed to allocate image descriptor in-use set");
51 }
52 memset((char *)new_in_use + old_in_use_size, 0,
53 new_in_use_size - old_in_use_size);
54 table->in_use = new_in_use;
55
56 const size_t new_free_table_size = new_alloc * sizeof(uint32_t);
57 new_free_table = vk_realloc(&dev->vk.alloc, table->free_table,
58 new_free_table_size, 4,
59 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
60 if (new_free_table == NULL) {
61 return vk_errorf(dev, VK_ERROR_OUT_OF_HOST_MEMORY,
62 "Failed to allocate image descriptor free table");
63 }
64 table->free_table = new_free_table;
65
66 table->alloc = new_alloc;
67
68 return VK_SUCCESS;
69 }
70
71 VkResult
nvk_descriptor_table_init(struct nvk_device * dev,struct nvk_descriptor_table * table,uint32_t descriptor_size,uint32_t min_descriptor_count,uint32_t max_descriptor_count)72 nvk_descriptor_table_init(struct nvk_device *dev,
73 struct nvk_descriptor_table *table,
74 uint32_t descriptor_size,
75 uint32_t min_descriptor_count,
76 uint32_t max_descriptor_count)
77 {
78 memset(table, 0, sizeof(*table));
79 VkResult result;
80
81 simple_mtx_init(&table->mutex, mtx_plain);
82
83 assert(util_is_power_of_two_nonzero(min_descriptor_count));
84 assert(util_is_power_of_two_nonzero(max_descriptor_count));
85
86 table->desc_size = descriptor_size;
87 table->alloc = 0;
88 table->max_alloc = max_descriptor_count;
89 table->next_desc = 0;
90 table->free_count = 0;
91
92 result = nvk_descriptor_table_grow_locked(dev, table, min_descriptor_count);
93 if (result != VK_SUCCESS) {
94 nvk_descriptor_table_finish(dev, table);
95 return result;
96 }
97
98 return VK_SUCCESS;
99 }
100
101 void
nvk_descriptor_table_finish(struct nvk_device * dev,struct nvk_descriptor_table * table)102 nvk_descriptor_table_finish(struct nvk_device *dev,
103 struct nvk_descriptor_table *table)
104 {
105 if (table->mem != NULL)
106 nvkmd_mem_unref(table->mem);
107 vk_free(&dev->vk.alloc, table->in_use);
108 vk_free(&dev->vk.alloc, table->free_table);
109 simple_mtx_destroy(&table->mutex);
110 }
111
112 #define NVK_IMAGE_DESC_INVALID
113
114 static VkResult
nvk_descriptor_table_alloc_locked(struct nvk_device * dev,struct nvk_descriptor_table * table,uint32_t * index_out)115 nvk_descriptor_table_alloc_locked(struct nvk_device *dev,
116 struct nvk_descriptor_table *table,
117 uint32_t *index_out)
118 {
119 VkResult result;
120
121 while (1) {
122 uint32_t index;
123 if (table->free_count > 0) {
124 index = table->free_table[--table->free_count];
125 } else if (table->next_desc < table->alloc) {
126 index = table->next_desc++;
127 } else {
128 if (table->next_desc >= table->max_alloc) {
129 return vk_errorf(dev, VK_ERROR_OUT_OF_HOST_MEMORY,
130 "Descriptor table not large enough");
131 }
132
133 result = nvk_descriptor_table_grow_locked(dev, table,
134 table->alloc * 2);
135 if (result != VK_SUCCESS)
136 return result;
137
138 assert(table->next_desc < table->alloc);
139 index = table->next_desc++;
140 }
141
142 if (!BITSET_TEST(table->in_use, index)) {
143 BITSET_SET(table->in_use, index);
144 *index_out = index;
145 return VK_SUCCESS;
146 }
147 }
148 }
149
150 static VkResult
nvk_descriptor_table_take_locked(struct nvk_device * dev,struct nvk_descriptor_table * table,uint32_t index)151 nvk_descriptor_table_take_locked(struct nvk_device *dev,
152 struct nvk_descriptor_table *table,
153 uint32_t index)
154 {
155 VkResult result;
156
157 while (index >= table->alloc) {
158 result = nvk_descriptor_table_grow_locked(dev, table, table->alloc * 2);
159 if (result != VK_SUCCESS)
160 return result;
161 }
162
163 if (BITSET_TEST(table->in_use, index)) {
164 return vk_errorf(dev, VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS,
165 "Descriptor %u is already in use", index);
166 } else {
167 BITSET_SET(table->in_use, index);
168 return VK_SUCCESS;
169 }
170 }
171
172 static VkResult
nvk_descriptor_table_add_locked(struct nvk_device * dev,struct nvk_descriptor_table * table,const void * desc_data,size_t desc_size,uint32_t * index_out)173 nvk_descriptor_table_add_locked(struct nvk_device *dev,
174 struct nvk_descriptor_table *table,
175 const void *desc_data, size_t desc_size,
176 uint32_t *index_out)
177 {
178 VkResult result = nvk_descriptor_table_alloc_locked(dev, table, index_out);
179 if (result != VK_SUCCESS)
180 return result;
181
182 void *map = (char *)table->mem->map + (*index_out * table->desc_size);
183
184 assert(desc_size == table->desc_size);
185 memcpy(map, desc_data, table->desc_size);
186
187 return VK_SUCCESS;
188 }
189
190
191 VkResult
nvk_descriptor_table_add(struct nvk_device * dev,struct nvk_descriptor_table * table,const void * desc_data,size_t desc_size,uint32_t * index_out)192 nvk_descriptor_table_add(struct nvk_device *dev,
193 struct nvk_descriptor_table *table,
194 const void *desc_data, size_t desc_size,
195 uint32_t *index_out)
196 {
197 simple_mtx_lock(&table->mutex);
198 VkResult result = nvk_descriptor_table_add_locked(dev, table, desc_data,
199 desc_size, index_out);
200 simple_mtx_unlock(&table->mutex);
201
202 return result;
203 }
204
205 static VkResult
nvk_descriptor_table_insert_locked(struct nvk_device * dev,struct nvk_descriptor_table * table,uint32_t index,const void * desc_data,size_t desc_size)206 nvk_descriptor_table_insert_locked(struct nvk_device *dev,
207 struct nvk_descriptor_table *table,
208 uint32_t index,
209 const void *desc_data, size_t desc_size)
210 {
211 VkResult result = nvk_descriptor_table_take_locked(dev, table, index);
212 if (result != VK_SUCCESS)
213 return result;
214
215 void *map = (char *)table->mem->map + (index * table->desc_size);
216
217 assert(desc_size == table->desc_size);
218 memcpy(map, desc_data, table->desc_size);
219
220 return result;
221 }
222
223 VkResult
nvk_descriptor_table_insert(struct nvk_device * dev,struct nvk_descriptor_table * table,uint32_t index,const void * desc_data,size_t desc_size)224 nvk_descriptor_table_insert(struct nvk_device *dev,
225 struct nvk_descriptor_table *table,
226 uint32_t index,
227 const void *desc_data, size_t desc_size)
228 {
229 simple_mtx_lock(&table->mutex);
230 VkResult result = nvk_descriptor_table_insert_locked(dev, table, index,
231 desc_data, desc_size);
232 simple_mtx_unlock(&table->mutex);
233
234 return result;
235 }
236
237 static int
compar_u32(const void * _a,const void * _b)238 compar_u32(const void *_a, const void *_b)
239 {
240 const uint32_t *a = _a, *b = _b;
241 return *a - *b;
242 }
243
244 static void
nvk_descriptor_table_compact_free_table(struct nvk_descriptor_table * table)245 nvk_descriptor_table_compact_free_table(struct nvk_descriptor_table *table)
246 {
247 if (table->free_count <= 1)
248 return;
249
250 qsort(table->free_table, table->free_count,
251 sizeof(*table->free_table), compar_u32);
252
253 uint32_t j = 1;
254 for (uint32_t i = 1; i < table->free_count; i++) {
255 if (table->free_table[i] == table->free_table[j - 1])
256 continue;
257
258 assert(table->free_table[i] > table->free_table[j - 1]);
259 table->free_table[j++] = table->free_table[i];
260 }
261
262 table->free_count = j;
263 }
264
265 void
nvk_descriptor_table_remove(struct nvk_device * dev,struct nvk_descriptor_table * table,uint32_t index)266 nvk_descriptor_table_remove(struct nvk_device *dev,
267 struct nvk_descriptor_table *table,
268 uint32_t index)
269 {
270 simple_mtx_lock(&table->mutex);
271
272 void *map = (char *)table->mem->map + (index * table->desc_size);
273 memset(map, 0, table->desc_size);
274
275 assert(BITSET_TEST(table->in_use, index));
276
277 /* There may be duplicate entries in the free table. For most operations,
278 * this is fine as we always consult nvk_descriptor_table::in_use when
279 * allocating. However, it does mean that there's nothing preventing our
280 * free table from growing larger than the memory we allocated for it. In
281 * the unlikely event that we end up with more entries than we can fit in
282 * the allocated space, compact the table to ensure that the new entry
283 * we're about to add fits.
284 */
285 if (table->free_count >= table->alloc)
286 nvk_descriptor_table_compact_free_table(table);
287 assert(table->free_count < table->alloc);
288
289 BITSET_CLEAR(table->in_use, index);
290 table->free_table[table->free_count++] = index;
291
292 simple_mtx_unlock(&table->mutex);
293 }
294