1 /*
2 * Copyright © 2022 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <anv_private.h>
25
26 /* Sparse binding handling.
27 *
28 * There is one main structure passed around all over this file:
29 *
30 * - struct anv_sparse_binding_data: every resource (VkBuffer or VkImage) has
31 * a pointer to an instance of this structure. It contains the virtual
32 * memory address (VMA) used by the binding operations (which is different
33 * from the VMA used by the anv_bo it's bound to) and the VMA range size. We
34 * do not keep record of our our list of bindings (which ranges were bound
35 * to which buffers).
36 */
37
38 __attribute__((format(printf, 1, 2)))
39 static void
sparse_debug(const char * format,...)40 sparse_debug(const char *format, ...)
41 {
42 if (!INTEL_DEBUG(DEBUG_SPARSE))
43 return;
44
45 va_list args;
46 va_start(args, format);
47 vfprintf(stderr, format, args);
48 va_end(args);
49 }
50
51 static void
dump_anv_vm_bind(struct anv_device * device,const struct anv_vm_bind * bind)52 dump_anv_vm_bind(struct anv_device *device,
53 const struct anv_vm_bind *bind)
54 {
55 sparse_debug("[%s] ", bind->op == ANV_VM_BIND ? " bind " : "unbind");
56
57 if (bind->bo)
58 sparse_debug("bo:%04u ", bind->bo->gem_handle);
59 else
60 sparse_debug("bo:---- ");
61 sparse_debug("address:%016"PRIx64" size:%08"PRIx64" "
62 "mem_offset:%08"PRIx64"\n",
63 bind->address, bind->size, bind->bo_offset);
64 }
65
66 static void
dump_anv_image(struct anv_image * i)67 dump_anv_image(struct anv_image *i)
68 {
69 if (!INTEL_DEBUG(DEBUG_SPARSE))
70 return;
71
72 sparse_debug("anv_image:\n");
73 sparse_debug("- format: %d\n", i->vk.format);
74 sparse_debug("- extent: [%d, %d, %d]\n",
75 i->vk.extent.width, i->vk.extent.height, i->vk.extent.depth);
76 sparse_debug("- mip_levels: %d array_layers: %d samples: %d\n",
77 i->vk.mip_levels, i->vk.array_layers, i->vk.samples);
78 sparse_debug("- n_planes: %d\n", i->n_planes);
79 sparse_debug("- disjoint: %d\n", i->disjoint);
80 }
81
82 static void
dump_isl_surf(struct isl_surf * s)83 dump_isl_surf(struct isl_surf *s)
84 {
85 if (!INTEL_DEBUG(DEBUG_SPARSE))
86 return;
87
88 sparse_debug("isl_surf:\n");
89
90 const char *dim_s = s->dim == ISL_SURF_DIM_1D ? "1D" :
91 s->dim == ISL_SURF_DIM_2D ? "2D" :
92 s->dim == ISL_SURF_DIM_3D ? "3D" :
93 "(ERROR)";
94 sparse_debug("- dim: %s\n", dim_s);
95 sparse_debug("- tiling: %d (%s)\n", s->tiling,
96 isl_tiling_to_name(s->tiling));
97 sparse_debug("- format: %s\n", isl_format_get_short_name(s->format));
98 sparse_debug("- image_alignment_el: [%d, %d, %d]\n",
99 s->image_alignment_el.w, s->image_alignment_el.h,
100 s->image_alignment_el.d);
101 sparse_debug("- logical_level0_px: [%d, %d, %d, %d]\n",
102 s->logical_level0_px.w,
103 s->logical_level0_px.h,
104 s->logical_level0_px.d,
105 s->logical_level0_px.a);
106 sparse_debug("- phys_level0_sa: [%d, %d, %d, %d]\n",
107 s->phys_level0_sa.w,
108 s->phys_level0_sa.h,
109 s->phys_level0_sa.d,
110 s->phys_level0_sa.a);
111 sparse_debug("- levels: %d samples: %d\n", s->levels, s->samples);
112 sparse_debug("- size_B: %"PRIu64" alignment_B: %u\n",
113 s->size_B, s->alignment_B);
114 sparse_debug("- row_pitch_B: %u\n", s->row_pitch_B);
115 sparse_debug("- array_pitch_el_rows: %u\n", s->array_pitch_el_rows);
116
117 const struct isl_format_layout *layout = isl_format_get_layout(s->format);
118 sparse_debug("- format layout:\n");
119 sparse_debug(" - format:%d bpb:%d bw:%d bh:%d bd:%d\n",
120 layout->format, layout->bpb, layout->bw, layout->bh,
121 layout->bd);
122
123 struct isl_tile_info tile_info;
124 isl_surf_get_tile_info(s, &tile_info);
125
126 sparse_debug("- tile info:\n");
127 sparse_debug(" - format_bpb: %d\n", tile_info.format_bpb);
128 sparse_debug(" - logical_extent_el: [%d, %d, %d, %d]\n",
129 tile_info.logical_extent_el.w,
130 tile_info.logical_extent_el.h,
131 tile_info.logical_extent_el.d,
132 tile_info.logical_extent_el.a);
133 sparse_debug(" - phys_extent_B: [%d, %d]\n",
134 tile_info.phys_extent_B.w,
135 tile_info.phys_extent_B.h);
136 }
137
138 static VkOffset3D
vk_offset3d_px_to_el(const VkOffset3D offset_px,const struct isl_format_layout * layout)139 vk_offset3d_px_to_el(const VkOffset3D offset_px,
140 const struct isl_format_layout *layout)
141 {
142 return (VkOffset3D) {
143 .x = offset_px.x / layout->bw,
144 .y = offset_px.y / layout->bh,
145 .z = offset_px.z / layout->bd,
146 };
147 }
148
149 static VkOffset3D
vk_offset3d_el_to_px(const VkOffset3D offset_el,const struct isl_format_layout * layout)150 vk_offset3d_el_to_px(const VkOffset3D offset_el,
151 const struct isl_format_layout *layout)
152 {
153 return (VkOffset3D) {
154 .x = offset_el.x * layout->bw,
155 .y = offset_el.y * layout->bh,
156 .z = offset_el.z * layout->bd,
157 };
158 }
159
160 static VkExtent3D
vk_extent3d_px_to_el(const VkExtent3D extent_px,const struct isl_format_layout * layout)161 vk_extent3d_px_to_el(const VkExtent3D extent_px,
162 const struct isl_format_layout *layout)
163 {
164 return (VkExtent3D) {
165 .width = extent_px.width / layout->bw,
166 .height = extent_px.height / layout->bh,
167 .depth = extent_px.depth / layout->bd,
168 };
169 }
170
171 static VkExtent3D
vk_extent3d_el_to_px(const VkExtent3D extent_el,const struct isl_format_layout * layout)172 vk_extent3d_el_to_px(const VkExtent3D extent_el,
173 const struct isl_format_layout *layout)
174 {
175 return (VkExtent3D) {
176 .width = extent_el.width * layout->bw,
177 .height = extent_el.height * layout->bh,
178 .depth = extent_el.depth * layout->bd,
179 };
180 }
181
182 static bool
isl_tiling_supports_standard_block_shapes(enum isl_tiling tiling)183 isl_tiling_supports_standard_block_shapes(enum isl_tiling tiling)
184 {
185 return isl_tiling_is_64(tiling) ||
186 tiling == ISL_TILING_ICL_Ys ||
187 tiling == ISL_TILING_SKL_Ys;
188 }
189
190 static uint32_t
isl_calc_tile_size(struct isl_tile_info * tile_info)191 isl_calc_tile_size(struct isl_tile_info *tile_info)
192 {
193 uint32_t tile_size = tile_info->phys_extent_B.w *
194 tile_info->phys_extent_B.h;
195 assert(tile_size == 64 * 1024 || tile_size == 4096 || tile_size == 1);
196 return tile_size;
197 }
198
199 static const VkExtent3D block_shapes_2d_1sample[] = {
200 /* 8 bits: */ { .width = 256, .height = 256, .depth = 1 },
201 /* 16 bits: */ { .width = 256, .height = 128, .depth = 1 },
202 /* 32 bits: */ { .width = 128, .height = 128, .depth = 1 },
203 /* 64 bits: */ { .width = 128, .height = 64, .depth = 1 },
204 /* 128 bits: */ { .width = 64, .height = 64, .depth = 1 },
205 };
206 static const VkExtent3D block_shapes_3d_1sample[] = {
207 /* 8 bits: */ { .width = 64, .height = 32, .depth = 32 },
208 /* 16 bits: */ { .width = 32, .height = 32, .depth = 32 },
209 /* 32 bits: */ { .width = 32, .height = 32, .depth = 16 },
210 /* 64 bits: */ { .width = 32, .height = 16, .depth = 16 },
211 /* 128 bits: */ { .width = 16, .height = 16, .depth = 16 },
212 };
213 static const VkExtent3D block_shapes_2d_2samples[] = {
214 /* 8 bits: */ { .width = 128, .height = 256, .depth = 1 },
215 /* 16 bits: */ { .width = 128, .height = 128, .depth = 1 },
216 /* 32 bits: */ { .width = 64, .height = 128, .depth = 1 },
217 /* 64 bits: */ { .width = 64, .height = 64, .depth = 1 },
218 /* 128 bits: */ { .width = 32, .height = 64, .depth = 1 },
219 };
220 static const VkExtent3D block_shapes_2d_4samples[] = {
221 /* 8 bits: */ { .width = 128, .height = 128, .depth = 1 },
222 /* 16 bits: */ { .width = 128, .height = 64, .depth = 1 },
223 /* 32 bits: */ { .width = 64, .height = 64, .depth = 1 },
224 /* 64 bits: */ { .width = 64, .height = 32, .depth = 1 },
225 /* 128 bits: */ { .width = 32, .height = 32, .depth = 1 },
226 };
227 static const VkExtent3D block_shapes_2d_8samples[] = {
228 /* 8 bits: */ { .width = 64, .height = 128, .depth = 1 },
229 /* 16 bits: */ { .width = 64, .height = 64, .depth = 1 },
230 /* 32 bits: */ { .width = 32, .height = 64, .depth = 1 },
231 /* 64 bits: */ { .width = 32, .height = 32, .depth = 1 },
232 /* 128 bits: */ { .width = 16, .height = 32, .depth = 1 },
233 };
234 static const VkExtent3D block_shapes_2d_16samples[] = {
235 /* 8 bits: */ { .width = 64, .height = 64, .depth = 1 },
236 /* 16 bits: */ { .width = 64, .height = 32, .depth = 1 },
237 /* 32 bits: */ { .width = 32, .height = 32, .depth = 1 },
238 /* 64 bits: */ { .width = 32, .height = 16, .depth = 1 },
239 /* 128 bits: */ { .width = 16, .height = 16, .depth = 1 },
240 };
241
242 static VkExtent3D
anv_sparse_get_standard_image_block_shape(enum isl_format format,VkImageType image_type,VkSampleCountFlagBits samples,uint16_t texel_size)243 anv_sparse_get_standard_image_block_shape(enum isl_format format,
244 VkImageType image_type,
245 VkSampleCountFlagBits samples,
246 uint16_t texel_size)
247 {
248 const struct isl_format_layout *layout = isl_format_get_layout(format);
249 VkExtent3D block_shape = { .width = 0, .height = 0, .depth = 0 };
250
251 int table_idx = ffs(texel_size) - 4;
252
253 switch (samples) {
254 case VK_SAMPLE_COUNT_1_BIT:
255 switch (image_type) {
256 case VK_IMAGE_TYPE_1D:
257 /* 1D images don't have a standard block format. */
258 assert(false);
259 break;
260 case VK_IMAGE_TYPE_2D:
261 block_shape = block_shapes_2d_1sample[table_idx];
262 break;
263 case VK_IMAGE_TYPE_3D:
264 block_shape = block_shapes_3d_1sample[table_idx];
265 break;
266 default:
267 fprintf(stderr, "unexpected image_type %d\n", image_type);
268 assert(false);
269 }
270 break;
271 case VK_SAMPLE_COUNT_2_BIT:
272 block_shape = block_shapes_2d_2samples[table_idx];
273 break;
274 case VK_SAMPLE_COUNT_4_BIT:
275 block_shape = block_shapes_2d_4samples[table_idx];
276 break;
277 case VK_SAMPLE_COUNT_8_BIT:
278 block_shape = block_shapes_2d_8samples[table_idx];
279 break;
280 case VK_SAMPLE_COUNT_16_BIT:
281 block_shape = block_shapes_2d_16samples[table_idx];
282 break;
283 default:
284 fprintf(stderr, "unexpected sample count: %d\n", samples);
285 assert(false);
286 }
287
288 return vk_extent3d_el_to_px(block_shape, layout);
289 }
290
291 /* Adds "bind_op" to the list in "submit", while also trying to check if we
292 * can just extend the last operation instead.
293 */
294 static VkResult
anv_sparse_submission_add(struct anv_device * device,struct anv_sparse_submission * submit,struct anv_vm_bind * bind_op)295 anv_sparse_submission_add(struct anv_device *device,
296 struct anv_sparse_submission *submit,
297 struct anv_vm_bind *bind_op)
298 {
299 struct anv_vm_bind *prev_bind = submit->binds_len == 0 ? NULL :
300 &submit->binds[submit->binds_len - 1];
301
302 if (prev_bind &&
303 bind_op->op == prev_bind->op &&
304 bind_op->bo == prev_bind->bo &&
305 bind_op->address == prev_bind->address + prev_bind->size &&
306 (bind_op->bo_offset == prev_bind->bo_offset + prev_bind->size ||
307 prev_bind->bo == NULL)) {
308 prev_bind->size += bind_op->size;
309 return VK_SUCCESS;
310 }
311
312 if (submit->binds_len < submit->binds_capacity) {
313 submit->binds[submit->binds_len++] = *bind_op;
314 return VK_SUCCESS;
315 }
316
317 int new_capacity = MAX2(32, submit->binds_capacity * 2);
318 struct anv_vm_bind *new_binds =
319 vk_realloc(&device->vk.alloc, submit->binds,
320 new_capacity * sizeof(*new_binds), 8,
321 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
322 if (!new_binds)
323 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
324
325 new_binds[submit->binds_len] = *bind_op;
326
327 submit->binds = new_binds;
328 submit->binds_len++;
329 submit->binds_capacity = new_capacity;
330
331 return VK_SUCCESS;
332 }
333
334 /* We really want to try to have all the page tables on as few BOs as possible
335 * to benefit from cache locality and to keep the i915.ko relocation lists
336 * small. On the other hand, we don't want to waste memory on unused space.
337 */
338 #define ANV_TRTT_PAGE_TABLE_BO_SIZE (2 * 1024 * 1024)
339
340 static VkResult
trtt_make_page_table_bo(struct anv_device * device,struct anv_bo ** bo)341 trtt_make_page_table_bo(struct anv_device *device, struct anv_bo **bo)
342 {
343 VkResult result;
344 struct anv_trtt *trtt = &device->trtt;
345
346 result = anv_device_alloc_bo(device, "trtt-page-table",
347 ANV_TRTT_PAGE_TABLE_BO_SIZE,
348 ANV_BO_ALLOC_INTERNAL,
349 0 /* explicit_address */, bo);
350 if (result != VK_SUCCESS)
351 return result;
352
353 if (trtt->num_page_table_bos < trtt->page_table_bos_capacity) {
354 trtt->page_table_bos[trtt->num_page_table_bos++] = *bo;
355 } else {
356
357 int new_capacity = MAX2(8, trtt->page_table_bos_capacity * 2);
358 struct anv_bo **new_page_table_bos =
359 vk_realloc(&device->vk.alloc, trtt->page_table_bos,
360 new_capacity * sizeof(*trtt->page_table_bos), 8,
361 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
362 if (!new_page_table_bos) {
363 anv_device_release_bo(device, *bo);
364 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
365 }
366
367 new_page_table_bos[trtt->num_page_table_bos] = *bo;
368
369 trtt->page_table_bos = new_page_table_bos;
370 trtt->page_table_bos_capacity = new_capacity;
371 trtt->num_page_table_bos++;
372 }
373
374 trtt->cur_page_table_bo = *bo;
375 trtt->next_page_table_bo_offset = 0;
376
377 sparse_debug("new number of page table BOs: %d\n",
378 trtt->num_page_table_bos);
379
380 return VK_SUCCESS;
381 }
382
383 static VkResult
trtt_get_page_table_bo(struct anv_device * device,struct anv_bo ** bo,uint64_t * bo_addr)384 trtt_get_page_table_bo(struct anv_device *device, struct anv_bo **bo,
385 uint64_t *bo_addr)
386 {
387 struct anv_trtt *trtt = &device->trtt;
388 VkResult result;
389
390 if (!trtt->cur_page_table_bo) {
391 result = trtt_make_page_table_bo(device, bo);
392 if (result != VK_SUCCESS)
393 return result;
394 }
395
396 *bo = trtt->cur_page_table_bo;
397 *bo_addr = trtt->cur_page_table_bo->offset +
398 trtt->next_page_table_bo_offset;
399
400 trtt->next_page_table_bo_offset += 4096;
401 if (trtt->next_page_table_bo_offset >= ANV_TRTT_PAGE_TABLE_BO_SIZE)
402 trtt->cur_page_table_bo = NULL;
403
404 return VK_SUCCESS;
405 }
406
407 static VkResult
anv_trtt_init_queues_state(struct anv_device * device)408 anv_trtt_init_queues_state(struct anv_device *device)
409 {
410 struct anv_trtt *trtt = &device->trtt;
411
412 struct anv_bo *l3_bo;
413 VkResult result = trtt_get_page_table_bo(device, &l3_bo, &trtt->l3_addr);
414 if (result != VK_SUCCESS)
415 return result;
416
417 trtt->l3_mirror = vk_zalloc(&device->vk.alloc, 4096, 8,
418 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
419 if (!trtt->l3_mirror)
420 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
421
422 /* L3 has 512 entries, so we can have up to 512 L2 tables. */
423 trtt->l2_mirror = vk_zalloc(&device->vk.alloc, 512 * 4096, 8,
424 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
425 if (!trtt->l2_mirror) {
426 vk_free(&device->vk.alloc, trtt->l3_mirror);
427 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
428 }
429
430 struct anv_async_submit submits[device->queue_count];
431 int submits_used = 0;
432 for (uint32_t i = 0; i < device->queue_count; i++) {
433 struct anv_queue *q = &device->queues[i];
434
435 result = anv_async_submit_init(&submits[submits_used], q,
436 &device->batch_bo_pool, false, true);
437 if (result != VK_SUCCESS)
438 break;
439
440 struct anv_async_submit *submit = &submits[submits_used++];
441
442 result = anv_genX(device->info, init_trtt_context_state)(submit);
443 if (result != VK_SUCCESS) {
444 anv_async_submit_fini(submit);
445 submits_used--;
446 break;
447 }
448
449 anv_genX(device->info, async_submit_end)(submit);
450
451 result = device->kmd_backend->queue_exec_async(submit, 0, NULL, 1,
452 &submit->signal);
453 if (result != VK_SUCCESS) {
454 anv_async_submit_fini(submit);
455 submits_used--;
456 break;
457 }
458 }
459
460 for (uint32_t i = 0; i < submits_used; i++) {
461 anv_async_submit_wait(&submits[i]);
462 anv_async_submit_fini(&submits[i]);
463 }
464
465 return result;
466 }
467
468 /* For L3 and L2 pages, null and invalid entries are indicated by bits 1 and 0
469 * respectively. For L1 entries, the hardware compares the addresses against
470 * what we program to the GFX_TRTT_NULL and GFX_TRTT_INVAL registers.
471 */
472 #define ANV_TRTT_L3L2_NULL_ENTRY (1 << 1)
473 #define ANV_TRTT_L3L2_INVALID_ENTRY (1 << 0)
474
475 static void
anv_trtt_bind_list_add_entry(struct anv_trtt_bind * binds,uint32_t * binds_len,uint64_t pte_addr,uint64_t entry_addr)476 anv_trtt_bind_list_add_entry(struct anv_trtt_bind *binds, uint32_t *binds_len,
477 uint64_t pte_addr, uint64_t entry_addr)
478 {
479 binds[*binds_len] = (struct anv_trtt_bind) {
480 .pte_addr = pte_addr,
481 .entry_addr = entry_addr,
482 };
483 (*binds_len)++;
484 }
485
486 /* Adds elements to the anv_trtt_bind structs passed. This doesn't write the
487 * entries to the HW yet.
488 */
489 static VkResult
anv_trtt_bind_add(struct anv_device * device,uint64_t trtt_addr,uint64_t dest_addr,struct anv_trtt_bind * l3l2_binds,uint32_t * n_l3l2_binds,struct anv_trtt_bind * l1_binds,uint32_t * n_l1_binds)490 anv_trtt_bind_add(struct anv_device *device,
491 uint64_t trtt_addr, uint64_t dest_addr,
492 struct anv_trtt_bind *l3l2_binds,
493 uint32_t *n_l3l2_binds,
494 struct anv_trtt_bind *l1_binds,
495 uint32_t *n_l1_binds)
496 {
497 VkResult result = VK_SUCCESS;
498 struct anv_trtt *trtt = &device->trtt;
499 bool is_null_bind = dest_addr == ANV_TRTT_L1_NULL_TILE_VAL;
500
501 int l3_index = (trtt_addr >> 35) & 0x1FF;
502 int l2_index = (trtt_addr >> 26) & 0x1FF;
503 int l1_index = (trtt_addr >> 16) & 0x3FF;
504
505 uint64_t l2_addr = trtt->l3_mirror[l3_index];
506 if (l2_addr == ANV_TRTT_L3L2_NULL_ENTRY && is_null_bind) {
507 return VK_SUCCESS;
508 } else if (l2_addr == 0 || l2_addr == ANV_TRTT_L3L2_NULL_ENTRY) {
509 if (is_null_bind) {
510 trtt->l3_mirror[l3_index] = ANV_TRTT_L3L2_NULL_ENTRY;
511
512 anv_trtt_bind_list_add_entry(l3l2_binds, n_l3l2_binds,
513 trtt->l3_addr + l3_index *
514 sizeof(uint64_t),
515 ANV_TRTT_L3L2_NULL_ENTRY);
516
517 return VK_SUCCESS;
518 }
519
520 struct anv_bo *l2_bo;
521 result = trtt_get_page_table_bo(device, &l2_bo, &l2_addr);
522 if (result != VK_SUCCESS)
523 return result;
524
525 trtt->l3_mirror[l3_index] = l2_addr;
526
527 anv_trtt_bind_list_add_entry(l3l2_binds, n_l3l2_binds,
528 trtt->l3_addr + l3_index *
529 sizeof(uint64_t), l2_addr);
530 }
531 assert(l2_addr != 0 && l2_addr != ANV_TRTT_L3L2_NULL_ENTRY);
532
533 /* The first page in the l2_mirror corresponds to l3_index=0 and so on. */
534 uint64_t l1_addr = trtt->l2_mirror[l3_index * 512 + l2_index];
535 if (l1_addr == ANV_TRTT_L3L2_NULL_ENTRY && is_null_bind) {
536 return VK_SUCCESS;
537 } else if (l1_addr == 0 || l1_addr == ANV_TRTT_L3L2_NULL_ENTRY) {
538 if (is_null_bind) {
539 trtt->l2_mirror[l3_index * 512 + l2_index] =
540 ANV_TRTT_L3L2_NULL_ENTRY;
541
542 anv_trtt_bind_list_add_entry(l3l2_binds, n_l3l2_binds,
543 l2_addr + l2_index * sizeof(uint64_t),
544 ANV_TRTT_L3L2_NULL_ENTRY);
545
546 return VK_SUCCESS;
547 }
548
549 struct anv_bo *l1_bo;
550 result = trtt_get_page_table_bo(device, &l1_bo, &l1_addr);
551 if (result != VK_SUCCESS)
552 return result;
553
554 trtt->l2_mirror[l3_index * 512 + l2_index] = l1_addr;
555
556 anv_trtt_bind_list_add_entry(l3l2_binds, n_l3l2_binds,
557 l2_addr + l2_index * sizeof(uint64_t),
558 l1_addr);
559 }
560 assert(l1_addr != 0 && l1_addr != ANV_TRTT_L3L2_NULL_ENTRY);
561
562 anv_trtt_bind_list_add_entry(l1_binds, n_l1_binds,
563 l1_addr + l1_index * sizeof(uint32_t),
564 dest_addr);
565
566 return VK_SUCCESS;
567 }
568
569 VkResult
anv_sparse_trtt_garbage_collect_batches(struct anv_device * device,bool wait_completion)570 anv_sparse_trtt_garbage_collect_batches(struct anv_device *device,
571 bool wait_completion)
572 {
573 struct anv_trtt *trtt = &device->trtt;
574
575 uint64_t last_value;
576 if (!wait_completion) {
577 VkResult result =
578 vk_sync_get_value(&device->vk, trtt->timeline, &last_value);
579 if (result != VK_SUCCESS)
580 return result;
581 } else {
582 last_value = trtt->timeline_val;
583 }
584
585 list_for_each_entry_safe(struct anv_trtt_submission, submit,
586 &trtt->in_flight_batches, link) {
587 if (submit->base.signal.signal_value <= last_value) {
588 list_del(&submit->link);
589 anv_async_submit_fini(&submit->base);
590 vk_free(&device->vk.alloc, submit);
591 continue;
592 }
593
594 if (!wait_completion)
595 break;
596
597 VkResult result = vk_sync_wait(
598 &device->vk,
599 submit->base.signal.sync,
600 submit->base.signal.signal_value,
601 VK_SYNC_WAIT_COMPLETE,
602 os_time_get_absolute_timeout(OS_TIMEOUT_INFINITE));
603 if (result == VK_SUCCESS) {
604 list_del(&submit->link);
605 anv_async_submit_fini(&submit->base);
606 vk_free(&device->vk.alloc, submit);
607 continue;
608 }
609
610 /* If the wait failed but the caller wanted completion, return the
611 * error.
612 */
613 return result;
614 }
615
616 return VK_SUCCESS;
617 }
618
619 static VkResult
anv_sparse_bind_trtt(struct anv_device * device,struct anv_sparse_submission * sparse_submit)620 anv_sparse_bind_trtt(struct anv_device *device,
621 struct anv_sparse_submission *sparse_submit)
622 {
623 struct anv_trtt *trtt = &device->trtt;
624 VkResult result;
625
626 /* TR-TT submission needs a queue even when the API entry point doesn't
627 * provide one, such as resource creation. We pick this queue from the user
628 * created queues at init_device_state() under anv_CreateDevice.
629 *
630 * It is technically possible for the user to create sparse resources even
631 * when they don't have a sparse queue: they won't be able to bind the
632 * resource but they should still be able to use the resource and rely on
633 * its unbound behavior. We haven't spotted any real world application or
634 * even test suite that exercises this behavior.
635 *
636 * For now let's just print an error message and return, which means that
637 * resource creation will succeed but the behavior will be undefined if the
638 * resource is used, which goes against our claim that we support the
639 * sparseResidencyNonResidentStrict property.
640 *
641 * TODO: be fully spec-compliant here. Maybe have a device-internal queue
642 * independent of the application's queues for the TR-TT operations.
643 */
644 if (!trtt->queue) {
645 static bool warned = false;
646 if (unlikely(!warned)) {
647 fprintf(stderr, "FIXME: application has created a sparse resource "
648 "but no queues capable of binding sparse resources were "
649 "created. Using these resources will result in undefined "
650 "behavior.\n");
651 warned = true;
652 }
653 return VK_SUCCESS;
654 }
655 if (!sparse_submit->queue)
656 sparse_submit->queue = trtt->queue;
657
658 struct anv_trtt_submission *submit =
659 vk_zalloc(&device->vk.alloc, sizeof(*submit), 8,
660 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
661 if (submit == NULL)
662 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
663
664 result = anv_async_submit_init(&submit->base, sparse_submit->queue,
665 &device->batch_bo_pool,
666 false, false);
667 if (result != VK_SUCCESS)
668 goto error_async;
669
670 simple_mtx_lock(&trtt->mutex);
671
672 anv_sparse_trtt_garbage_collect_batches(device, false);
673
674 submit->base.signal = (struct vk_sync_signal) {
675 .sync = trtt->timeline,
676 .signal_value = ++trtt->timeline_val,
677 };
678
679 /* If the TRTT L3 table was never set, initialize it as part of this
680 * submission.
681 */
682 if (!trtt->l3_addr) {
683 result = anv_trtt_init_queues_state(device);
684 if (result != VK_SUCCESS)
685 goto error_add_bind;
686 }
687 assert(trtt->l3_addr);
688
689 /* These capacities are conservative estimations. For L1 binds the
690 * number will match exactly unless we skip NULL binds due to L2 already
691 * being NULL. For L3/L2 things are harder to estimate, but the resulting
692 * numbers are so small that a little overestimation won't hurt.
693 *
694 * We have assertions below to catch estimation errors.
695 */
696 int l3l2_binds_capacity = 1;
697 int l1_binds_capacity = 0;
698 for (int b = 0; b < sparse_submit->binds_len; b++) {
699 assert(sparse_submit->binds[b].size % (64 * 1024) == 0);
700 int pages = sparse_submit->binds[b].size / (64 * 1024);
701 l1_binds_capacity += pages;
702 l3l2_binds_capacity += (pages / 1024 + 1) * 2;
703 }
704
705 /* Turn a series of virtual address maps, into a list of L3/L2/L1 TRTT page
706 * table updates.
707 */
708 STACK_ARRAY(struct anv_trtt_bind, l3l2_binds, l3l2_binds_capacity);
709 STACK_ARRAY(struct anv_trtt_bind, l1_binds, l1_binds_capacity);
710 uint32_t n_l3l2_binds = 0, n_l1_binds = 0;
711 for (int b = 0; b < sparse_submit->binds_len && result == VK_SUCCESS; b++) {
712 struct anv_vm_bind *vm_bind = &sparse_submit->binds[b];
713 for (size_t i = 0; i < vm_bind->size && result == VK_SUCCESS; i += 64 * 1024) {
714 uint64_t trtt_addr = vm_bind->address + i;
715 uint64_t dest_addr =
716 (vm_bind->op == ANV_VM_BIND && vm_bind->bo) ?
717 vm_bind->bo->offset + vm_bind->bo_offset + i :
718 ANV_TRTT_L1_NULL_TILE_VAL;
719
720 result = anv_trtt_bind_add(device, trtt_addr, dest_addr,
721 l3l2_binds, &n_l3l2_binds,
722 l1_binds, &n_l1_binds);
723 }
724 }
725
726 assert(n_l3l2_binds <= l3l2_binds_capacity);
727 assert(n_l1_binds <= l1_binds_capacity);
728
729 /* Convert the L3/L2/L1 TRTT page table updates in anv_trtt_bind elements
730 * into MI commands.
731 */
732 if (result == VK_SUCCESS) {
733 sparse_debug("trtt_binds: num_vm_binds:%02d l3l2:%04d l1:%04d\n",
734 sparse_submit->binds_len, n_l3l2_binds, n_l1_binds);
735
736 if (n_l3l2_binds || n_l1_binds) {
737 anv_genX(device->info, write_trtt_entries)(
738 &submit->base, l3l2_binds, n_l3l2_binds, l1_binds, n_l1_binds);
739 }
740 }
741
742 STACK_ARRAY_FINISH(l1_binds);
743 STACK_ARRAY_FINISH(l3l2_binds);
744
745 anv_genX(device->info, async_submit_end)(&submit->base);
746
747 if (submit->base.batch.status != VK_SUCCESS) {
748 result = submit->base.batch.status;
749 goto error_add_bind;
750 }
751
752 /* Add all the BOs backing TRTT page tables to the reloc list.
753 *
754 * TODO: we could narrow down the list by using anv_address structures in
755 * anv_trtt_bind for the pte_addr.
756 */
757 if (device->physical->uses_relocs) {
758 for (int i = 0; i < trtt->num_page_table_bos; i++) {
759 result = anv_reloc_list_add_bo(&submit->base.relocs,
760 trtt->page_table_bos[i]);
761 if (result != VK_SUCCESS)
762 goto error_add_bind;
763 }
764 }
765
766 result =
767 device->kmd_backend->queue_exec_async(&submit->base,
768 sparse_submit->wait_count,
769 sparse_submit->waits,
770 sparse_submit->signal_count,
771 sparse_submit->signals);
772 if (result != VK_SUCCESS)
773 goto error_add_bind;
774
775
776 list_addtail(&submit->link, &trtt->in_flight_batches);
777
778 simple_mtx_unlock(&trtt->mutex);
779
780 ANV_RMV(vm_binds, device, sparse_submit->binds, sparse_submit->binds_len);
781
782 return VK_SUCCESS;
783
784 error_add_bind:
785 simple_mtx_unlock(&trtt->mutex);
786 anv_async_submit_fini(&submit->base);
787 error_async:
788 vk_free(&device->vk.alloc, submit);
789 return result;
790 }
791
792 static VkResult
anv_sparse_bind_vm_bind(struct anv_device * device,struct anv_sparse_submission * submit)793 anv_sparse_bind_vm_bind(struct anv_device *device,
794 struct anv_sparse_submission *submit)
795 {
796 struct anv_queue *queue = submit->queue;
797
798 VkResult result = device->kmd_backend->vm_bind(device, submit,
799 ANV_VM_BIND_FLAG_NONE);
800 if (!queue) {
801 assert(submit->wait_count == 0 && submit->signal_count == 0 &&
802 submit->binds_len == 1);
803 return result;
804 }
805
806 if (result == VK_ERROR_OUT_OF_HOST_MEMORY) {
807 /* If we get this, the system is under memory pressure. First we
808 * manually wait for all our dependency syncobjs hoping that some memory
809 * will be released while we wait, then we try to issue each bind
810 * operation in a single ioctl as it requires less Kernel memory and so
811 * we may be able to move things forward, although slowly, while also
812 * waiting for each operation to complete before issuing the next.
813 * Performance isn't a concern at this point: we're just trying to move
814 * progress forward without crashing until whatever is eating too much
815 * memory goes away.
816 */
817
818 result = vk_sync_wait_many(&device->vk, submit->wait_count,
819 submit->waits, VK_SYNC_WAIT_COMPLETE,
820 INT64_MAX);
821 if (result != VK_SUCCESS)
822 return vk_queue_set_lost(&queue->vk, "vk_sync_wait_many failed");
823
824 struct vk_sync *sync;
825 result = vk_sync_create(&device->vk,
826 &device->physical->sync_syncobj_type,
827 VK_SYNC_IS_TIMELINE, 0 /* initial_value */,
828 &sync);
829 if (result != VK_SUCCESS)
830 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
831
832 for (int b = 0; b < submit->binds_len; b++) {
833 struct vk_sync_signal sync_signal = {
834 .sync = sync,
835 .signal_value = b + 1,
836 };
837 struct anv_sparse_submission s = {
838 .queue = submit->queue,
839 .binds = &submit->binds[b],
840 .binds_len = 1,
841 .binds_capacity = 1,
842 .wait_count = 0,
843 .signal_count = 1,
844 .waits = NULL,
845 .signals = &sync_signal,
846 };
847 result = device->kmd_backend->vm_bind(device, &s,
848 ANV_VM_BIND_FLAG_NONE);
849 if (result != VK_SUCCESS) {
850 vk_sync_destroy(&device->vk, sync);
851 return vk_error(device, result); /* Well, at least we tried... */
852 }
853
854 result = vk_sync_wait(&device->vk, sync, sync_signal.signal_value,
855 VK_SYNC_WAIT_COMPLETE, UINT64_MAX);
856 if (result != VK_SUCCESS) {
857 vk_sync_destroy(&device->vk, sync);
858 return vk_queue_set_lost(&queue->vk, "vk_sync_wait failed");
859 }
860 }
861
862 vk_sync_destroy(&device->vk, sync);
863
864 for (uint32_t i = 0; i < submit->signal_count; i++) {
865 struct vk_sync_signal *s = &submit->signals[i];
866 result = vk_sync_signal(&device->vk, s->sync, s->signal_value);
867 if (result != VK_SUCCESS)
868 return vk_queue_set_lost(&queue->vk, "vk_sync_signal failed");
869 }
870 }
871
872 return VK_SUCCESS;
873 }
874
875 VkResult
anv_sparse_bind(struct anv_device * device,struct anv_sparse_submission * submit)876 anv_sparse_bind(struct anv_device *device,
877 struct anv_sparse_submission *submit)
878 {
879 if (INTEL_DEBUG(DEBUG_SPARSE)) {
880 for (int b = 0; b < submit->binds_len; b++)
881 dump_anv_vm_bind(device, &submit->binds[b]);
882 }
883
884 return device->physical->sparse_type == ANV_SPARSE_TYPE_TRTT ?
885 anv_sparse_bind_trtt(device, submit) :
886 anv_sparse_bind_vm_bind(device, submit);
887 }
888
889 VkResult
anv_init_sparse_bindings(struct anv_device * device,uint64_t size_,struct anv_sparse_binding_data * sparse,enum anv_bo_alloc_flags alloc_flags,uint64_t client_address,struct anv_address * out_address)890 anv_init_sparse_bindings(struct anv_device *device,
891 uint64_t size_,
892 struct anv_sparse_binding_data *sparse,
893 enum anv_bo_alloc_flags alloc_flags,
894 uint64_t client_address,
895 struct anv_address *out_address)
896 {
897 uint64_t size = align64(size_, ANV_SPARSE_BLOCK_SIZE);
898
899 if (device->physical->sparse_type == ANV_SPARSE_TYPE_TRTT)
900 alloc_flags |= ANV_BO_ALLOC_TRTT;
901
902 sparse->address = anv_vma_alloc(device, size, ANV_SPARSE_BLOCK_SIZE,
903 alloc_flags,
904 intel_48b_address(client_address),
905 &sparse->vma_heap);
906 sparse->size = size;
907
908 out_address->bo = NULL;
909 out_address->offset = sparse->address;
910
911 struct anv_vm_bind bind = {
912 .bo = NULL, /* That's a NULL binding. */
913 .address = sparse->address,
914 .bo_offset = 0,
915 .size = size,
916 .op = ANV_VM_BIND,
917 };
918 struct anv_sparse_submission submit = {
919 .queue = NULL,
920 .binds = &bind,
921 .binds_len = 1,
922 .binds_capacity = 1,
923 .wait_count = 0,
924 .signal_count = 0,
925 };
926 VkResult res = anv_sparse_bind(device, &submit);
927 if (res != VK_SUCCESS) {
928 anv_vma_free(device, sparse->vma_heap, sparse->address, sparse->size);
929 return res;
930 }
931
932 p_atomic_inc(&device->num_sparse_resources);
933 return VK_SUCCESS;
934 }
935
936 void
anv_free_sparse_bindings(struct anv_device * device,struct anv_sparse_binding_data * sparse)937 anv_free_sparse_bindings(struct anv_device *device,
938 struct anv_sparse_binding_data *sparse)
939 {
940 if (!sparse->address)
941 return;
942
943 sparse_debug("%s: address:0x%016"PRIx64" size:0x%08"PRIx64"\n",
944 __func__, sparse->address, sparse->size);
945
946 p_atomic_dec(&device->num_sparse_resources);
947
948 struct anv_vm_bind unbind = {
949 .bo = 0,
950 .address = sparse->address,
951 .bo_offset = 0,
952 .size = sparse->size,
953 .op = ANV_VM_UNBIND,
954 };
955 struct anv_sparse_submission submit = {
956 .queue = NULL,
957 .binds = &unbind,
958 .binds_len = 1,
959 .binds_capacity = 1,
960 .wait_count = 0,
961 .signal_count = 0,
962 };
963 VkResult res = anv_sparse_bind(device, &submit);
964
965 /* Our callers don't have a way to signal failure to the upper layers, so
966 * just keep the vma if we fail to unbind it. Still, let's have an
967 * assertion because this really shouldn't be happening.
968 */
969 assert(res == VK_SUCCESS);
970 if (res != VK_SUCCESS)
971 return;
972
973 anv_vma_free(device, sparse->vma_heap, sparse->address, sparse->size);
974 }
975
976 static VkExtent3D
anv_sparse_calc_block_shape(struct anv_physical_device * pdevice,struct isl_surf * surf,const struct isl_tile_info * tile_info)977 anv_sparse_calc_block_shape(struct anv_physical_device *pdevice,
978 struct isl_surf *surf,
979 const struct isl_tile_info *tile_info)
980 {
981 const struct isl_format_layout *layout =
982 isl_format_get_layout(surf->format);
983
984 VkExtent3D block_shape_el = {
985 .width = tile_info->logical_extent_el.width,
986 .height = tile_info->logical_extent_el.height,
987 .depth = tile_info->logical_extent_el.depth,
988 };
989 VkExtent3D block_shape_px = vk_extent3d_el_to_px(block_shape_el, layout);
990
991 assert(surf->tiling != ISL_TILING_LINEAR);
992
993 return block_shape_px;
994 }
995
996 VkSparseImageFormatProperties
anv_sparse_calc_image_format_properties(struct anv_physical_device * pdevice,VkImageAspectFlags aspect,VkImageType vk_image_type,VkSampleCountFlagBits vk_samples,struct isl_surf * surf)997 anv_sparse_calc_image_format_properties(struct anv_physical_device *pdevice,
998 VkImageAspectFlags aspect,
999 VkImageType vk_image_type,
1000 VkSampleCountFlagBits vk_samples,
1001 struct isl_surf *surf)
1002 {
1003 const struct isl_format_layout *isl_layout =
1004 isl_format_get_layout(surf->format);
1005 struct isl_tile_info tile_info;
1006 isl_surf_get_tile_info(surf, &tile_info);
1007 const int bpb = isl_layout->bpb;
1008 assert(bpb == 8 || bpb == 16 || bpb == 32 || bpb == 64 ||bpb == 128);
1009
1010 VkExtent3D granularity = anv_sparse_calc_block_shape(pdevice, surf,
1011 &tile_info);
1012 bool is_standard = false;
1013 bool is_known_nonstandard_format = false;
1014
1015 /* We shouldn't be able to reach this function with a 1D image. */
1016 assert(vk_image_type != VK_IMAGE_TYPE_1D);
1017
1018 VkExtent3D std_shape =
1019 anv_sparse_get_standard_image_block_shape(surf->format,
1020 vk_image_type, vk_samples,
1021 bpb);
1022 /* YUV formats don't work with Tile64, which is required if we want to
1023 * claim standard block shapes. The spec requires us to support all
1024 * non-compressed color formats that non-sparse supports, so we can't just
1025 * say YUV formats are not supported by Sparse. So we end supporting this
1026 * format and anv_sparse_calc_miptail_properties() will say that everything
1027 * is part of the miptail.
1028 *
1029 * For more details on the hardware restriction, please check
1030 * isl_gfx125_filter_tiling().
1031 */
1032 if (pdevice->info.verx10 >= 125 && isl_format_is_yuv(surf->format))
1033 is_known_nonstandard_format = true;
1034
1035 /* The standard block shapes (and by extension, the tiling formats they
1036 * require) are simply incompatible with getting a 2D view of a 3D image.
1037 */
1038 if (surf->usage & ISL_SURF_USAGE_2D_3D_COMPATIBLE_BIT)
1039 is_known_nonstandard_format = true;
1040
1041 is_standard = granularity.width == std_shape.width &&
1042 granularity.height == std_shape.height &&
1043 granularity.depth == std_shape.depth;
1044
1045 /* TODO: dEQP seems to care about the block shapes being standard even for
1046 * the cases where is_known_nonstandard_format is true. Luckily as of today
1047 * all of those cases are NotSupported but sooner or later we may end up
1048 * getting a failure.
1049 * Notice that in practice we report these cases as having the mip tail
1050 * starting on mip level 0, so the reported block shapes are irrelevant
1051 * since non-opaque binds are not supported. Still, dEQP seems to care.
1052 */
1053 assert(is_standard || is_known_nonstandard_format);
1054 assert(!(is_standard && is_known_nonstandard_format));
1055
1056 bool wrong_block_size = isl_calc_tile_size(&tile_info) !=
1057 ANV_SPARSE_BLOCK_SIZE;
1058
1059 return (VkSparseImageFormatProperties) {
1060 .aspectMask = aspect,
1061 .imageGranularity = granularity,
1062 .flags = ((is_standard || is_known_nonstandard_format) ? 0 :
1063 VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT) |
1064 (wrong_block_size ? VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT :
1065 0),
1066 };
1067 }
1068
1069 /* The miptail is supposed to be this region where the tiniest mip levels
1070 * are squished together in one single page, which should save us some memory.
1071 * It's a hardware feature which our hardware supports on certain tiling
1072 * formats - the ones we always want to use for sparse resources.
1073 *
1074 * For sparse, the main feature of the miptail is that it only supports opaque
1075 * binds, so you either bind the whole miptail or you bind nothing at all,
1076 * there are no subresources inside it to separately bind. While the idea is
1077 * that the miptail as reported by sparse should match what our hardware does,
1078 * in practice we can say in our sparse functions that certain mip levels are
1079 * part of the miptail while from the point of view of our hardwared they
1080 * aren't.
1081 *
1082 * If we detect we're using the sparse-friendly tiling formats and ISL
1083 * supports miptails for them, we can just trust the miptail level set by ISL
1084 * and things can proceed as The Spec intended.
1085 *
1086 * However, if that's not the case, we have to go on a best-effort policy. We
1087 * could simply declare that every mip level is part of the miptail and be
1088 * done, but since that kinda defeats the purpose of Sparse we try to find
1089 * what level we really should be reporting as the first miptail level based
1090 * on the alignments of the surface subresources.
1091 */
1092 void
anv_sparse_calc_miptail_properties(struct anv_device * device,struct anv_image * image,VkImageAspectFlags vk_aspect,uint32_t * imageMipTailFirstLod,VkDeviceSize * imageMipTailSize,VkDeviceSize * imageMipTailOffset,VkDeviceSize * imageMipTailStride)1093 anv_sparse_calc_miptail_properties(struct anv_device *device,
1094 struct anv_image *image,
1095 VkImageAspectFlags vk_aspect,
1096 uint32_t *imageMipTailFirstLod,
1097 VkDeviceSize *imageMipTailSize,
1098 VkDeviceSize *imageMipTailOffset,
1099 VkDeviceSize *imageMipTailStride)
1100 {
1101 const uint32_t plane = anv_image_aspect_to_plane(image, vk_aspect);
1102 struct isl_surf *surf = &image->planes[plane].primary_surface.isl;
1103 uint64_t binding_plane_offset =
1104 image->planes[plane].primary_surface.memory_range.offset;
1105 struct isl_tile_info tile_info;
1106 isl_surf_get_tile_info(surf, &tile_info);
1107 uint64_t layer1_offset;
1108 uint32_t x_off, y_off;
1109
1110 /* Treat the whole thing as a single miptail. We should have already
1111 * reported this image as VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT.
1112 *
1113 * In theory we could try to make ISL massage the alignments so that we
1114 * could at least claim mip level 0 to be not part of the miptail, but
1115 * that could end up wasting a lot of memory, so it's better to do
1116 * nothing and focus our efforts into making things use the appropriate
1117 * tiling formats that give us the standard block shapes.
1118 */
1119 if (isl_calc_tile_size(&tile_info) != ANV_SPARSE_BLOCK_SIZE)
1120 goto out_everything_is_miptail;
1121
1122 assert(surf->tiling != ISL_TILING_LINEAR);
1123
1124 if (image->vk.array_layers == 1) {
1125 layer1_offset = surf->size_B;
1126 } else {
1127 isl_surf_get_image_offset_B_tile_sa(surf, 0, 1, 0, &layer1_offset,
1128 &x_off, &y_off);
1129 if (x_off || y_off)
1130 goto out_everything_is_miptail;
1131 }
1132 assert(layer1_offset % ANV_SPARSE_BLOCK_SIZE == 0);
1133
1134 /* We could try to do better here, but there's not really any point since
1135 * we should be supporting the appropriate tiling formats everywhere.
1136 */
1137 if (!isl_tiling_supports_standard_block_shapes(surf->tiling))
1138 goto out_everything_is_miptail;
1139
1140 int miptail_first_level = surf->miptail_start_level;
1141 if (miptail_first_level >= image->vk.mip_levels)
1142 goto out_no_miptail;
1143
1144 uint64_t miptail_offset = 0;
1145 isl_surf_get_image_offset_B_tile_sa(surf, miptail_first_level, 0, 0,
1146 &miptail_offset,
1147 &x_off, &y_off);
1148 assert(x_off == 0 && y_off == 0);
1149 assert(miptail_offset % ANV_SPARSE_BLOCK_SIZE == 0);
1150
1151 *imageMipTailFirstLod = miptail_first_level;
1152 *imageMipTailSize = ANV_SPARSE_BLOCK_SIZE;
1153 *imageMipTailOffset = binding_plane_offset + miptail_offset;
1154 *imageMipTailStride = layer1_offset;
1155 goto out_debug;
1156
1157 out_no_miptail:
1158 *imageMipTailFirstLod = image->vk.mip_levels;
1159 *imageMipTailSize = 0;
1160 *imageMipTailOffset = 0;
1161 *imageMipTailStride = 0;
1162 goto out_debug;
1163
1164 out_everything_is_miptail:
1165 *imageMipTailFirstLod = 0;
1166 *imageMipTailSize = surf->size_B;
1167 *imageMipTailOffset = binding_plane_offset;
1168 *imageMipTailStride = 0;
1169
1170 out_debug:
1171 sparse_debug("miptail first_lod:%d size:%"PRIu64" offset:%"PRIu64" "
1172 "stride:%"PRIu64"\n",
1173 *imageMipTailFirstLod, *imageMipTailSize,
1174 *imageMipTailOffset, *imageMipTailStride);
1175 }
1176
1177 static struct anv_vm_bind
vk_bind_to_anv_vm_bind(struct anv_sparse_binding_data * sparse,const struct VkSparseMemoryBind * vk_bind)1178 vk_bind_to_anv_vm_bind(struct anv_sparse_binding_data *sparse,
1179 const struct VkSparseMemoryBind *vk_bind)
1180 {
1181 struct anv_vm_bind anv_bind = {
1182 .bo = NULL,
1183 .address = sparse->address + vk_bind->resourceOffset,
1184 .bo_offset = 0,
1185 .size = vk_bind->size,
1186 .op = ANV_VM_BIND,
1187 };
1188
1189 assert(vk_bind->size);
1190 assert(vk_bind->resourceOffset + vk_bind->size <= sparse->size);
1191
1192 if (vk_bind->memory != VK_NULL_HANDLE) {
1193 anv_bind.bo = anv_device_memory_from_handle(vk_bind->memory)->bo;
1194 anv_bind.bo_offset = vk_bind->memoryOffset,
1195 assert(vk_bind->memoryOffset + vk_bind->size <= anv_bind.bo->size);
1196 }
1197
1198 return anv_bind;
1199 }
1200
1201 static VkResult
anv_sparse_bind_resource_memory(struct anv_device * device,struct anv_sparse_binding_data * sparse,uint64_t resource_size,const VkSparseMemoryBind * vk_bind,struct anv_sparse_submission * submit)1202 anv_sparse_bind_resource_memory(struct anv_device *device,
1203 struct anv_sparse_binding_data *sparse,
1204 uint64_t resource_size,
1205 const VkSparseMemoryBind *vk_bind,
1206 struct anv_sparse_submission *submit)
1207 {
1208 struct anv_vm_bind bind = vk_bind_to_anv_vm_bind(sparse, vk_bind);
1209 uint64_t rem = vk_bind->size % ANV_SPARSE_BLOCK_SIZE;
1210
1211 if (rem != 0) {
1212 if (vk_bind->resourceOffset + vk_bind->size == resource_size)
1213 bind.size += ANV_SPARSE_BLOCK_SIZE - rem;
1214 else
1215 return vk_error(device, VK_ERROR_VALIDATION_FAILED_EXT);
1216 }
1217
1218 return anv_sparse_submission_add(device, submit, &bind);
1219 }
1220
1221 VkResult
anv_sparse_bind_buffer(struct anv_device * device,struct anv_buffer * buffer,const VkSparseMemoryBind * vk_bind,struct anv_sparse_submission * submit)1222 anv_sparse_bind_buffer(struct anv_device *device,
1223 struct anv_buffer *buffer,
1224 const VkSparseMemoryBind *vk_bind,
1225 struct anv_sparse_submission *submit)
1226 {
1227 return anv_sparse_bind_resource_memory(device, &buffer->sparse_data,
1228 buffer->vk.size,
1229 vk_bind, submit);
1230 }
1231
1232 VkResult
anv_sparse_bind_image_opaque(struct anv_device * device,struct anv_image * image,const VkSparseMemoryBind * vk_bind,struct anv_sparse_submission * submit)1233 anv_sparse_bind_image_opaque(struct anv_device *device,
1234 struct anv_image *image,
1235 const VkSparseMemoryBind *vk_bind,
1236 struct anv_sparse_submission *submit)
1237 {
1238 struct anv_image_binding *b =
1239 &image->bindings[ANV_IMAGE_MEMORY_BINDING_MAIN];
1240 assert(!image->disjoint);
1241
1242 if (INTEL_DEBUG(DEBUG_SPARSE)) {
1243 sparse_debug("%s:\n", __func__);
1244 dump_anv_image(image);
1245 u_foreach_bit(b, image->vk.aspects) {
1246 VkImageAspectFlagBits aspect = 1 << b;
1247 const uint32_t plane = anv_image_aspect_to_plane(image, aspect);
1248 struct isl_surf *surf = &image->planes[plane].primary_surface.isl;
1249 sparse_debug("aspect 0x%x (plane %d):\n", aspect, plane);
1250 dump_isl_surf(surf);
1251 }
1252 sparse_debug("\n");
1253 }
1254
1255 return anv_sparse_bind_resource_memory(device, &b->sparse_data,
1256 b->memory_range.size,
1257 vk_bind, submit);
1258 }
1259
1260 VkResult
anv_sparse_bind_image_memory(struct anv_queue * queue,struct anv_image * image,const VkSparseImageMemoryBind * bind,struct anv_sparse_submission * submit)1261 anv_sparse_bind_image_memory(struct anv_queue *queue,
1262 struct anv_image *image,
1263 const VkSparseImageMemoryBind *bind,
1264 struct anv_sparse_submission *submit)
1265 {
1266 struct anv_device *device = queue->device;
1267 VkImageAspectFlags aspect = bind->subresource.aspectMask;
1268 uint32_t mip_level = bind->subresource.mipLevel;
1269 uint32_t array_layer = bind->subresource.arrayLayer;
1270
1271 assert(!(bind->flags & VK_SPARSE_MEMORY_BIND_METADATA_BIT));
1272
1273 struct anv_image_binding *img_binding = image->disjoint ?
1274 anv_image_aspect_to_binding(image, aspect) :
1275 &image->bindings[ANV_IMAGE_MEMORY_BINDING_MAIN];
1276 struct anv_sparse_binding_data *sparse_data = &img_binding->sparse_data;
1277
1278 const uint32_t plane = anv_image_aspect_to_plane(image, aspect);
1279 struct isl_surf *surf = &image->planes[plane].primary_surface.isl;
1280 uint64_t binding_plane_offset =
1281 image->planes[plane].primary_surface.memory_range.offset;
1282 const struct isl_format_layout *layout =
1283 isl_format_get_layout(surf->format);
1284 struct isl_tile_info tile_info;
1285 isl_surf_get_tile_info(surf, &tile_info);
1286
1287 if (INTEL_DEBUG(DEBUG_SPARSE)) {
1288 sparse_debug("%s:\n", __func__);
1289 sparse_debug("mip_level:%d array_layer:%d\n", mip_level, array_layer);
1290 sparse_debug("aspect:0x%x plane:%d\n", aspect, plane);
1291 sparse_debug("binding offset: [%d, %d, %d] extent: [%d, %d, %d]\n",
1292 bind->offset.x, bind->offset.y, bind->offset.z,
1293 bind->extent.width, bind->extent.height,
1294 bind->extent.depth);
1295 dump_anv_image(image);
1296 dump_isl_surf(surf);
1297 sparse_debug("\n");
1298 }
1299
1300 VkExtent3D block_shape_px =
1301 anv_sparse_calc_block_shape(device->physical, surf, &tile_info);
1302 VkExtent3D block_shape_el = vk_extent3d_px_to_el(block_shape_px, layout);
1303
1304 /* Both bind->offset and bind->extent are in pixel units. */
1305 VkOffset3D bind_offset_el = vk_offset3d_px_to_el(bind->offset, layout);
1306
1307 /* The spec says we only really need to align if for a given coordinate
1308 * offset + extent equals the corresponding dimensions of the image
1309 * subresource, but all the other non-aligned usage is invalid, so just
1310 * align everything.
1311 */
1312 VkExtent3D bind_extent_px = {
1313 .width = ALIGN_NPOT(bind->extent.width, block_shape_px.width),
1314 .height = ALIGN_NPOT(bind->extent.height, block_shape_px.height),
1315 .depth = ALIGN_NPOT(bind->extent.depth, block_shape_px.depth),
1316 };
1317 VkExtent3D bind_extent_el = vk_extent3d_px_to_el(bind_extent_px, layout);
1318
1319 /* Nothing that has a tile_size different than ANV_SPARSE_BLOCK_SIZE should
1320 * be reaching here, as these cases should be treated as "everything is
1321 * part of the miptail" (see anv_sparse_calc_miptail_properties()).
1322 */
1323 assert(isl_calc_tile_size(&tile_info) == ANV_SPARSE_BLOCK_SIZE);
1324
1325 /* How many blocks are necessary to form a whole line on this image? */
1326 const uint32_t blocks_per_line = surf->row_pitch_B / (layout->bpb / 8) /
1327 block_shape_el.width;
1328 /* The loop below will try to bind a whole line of blocks at a time as
1329 * they're guaranteed to be contiguous, so we calculate how many blocks
1330 * that is and how big is each block to figure the bind size of a whole
1331 * line.
1332 */
1333 uint64_t line_bind_size_in_blocks = bind_extent_el.width /
1334 block_shape_el.width;
1335 uint64_t line_bind_size = line_bind_size_in_blocks * ANV_SPARSE_BLOCK_SIZE;
1336 assert(line_bind_size_in_blocks != 0);
1337 assert(line_bind_size != 0);
1338
1339 uint64_t memory_offset = bind->memoryOffset;
1340 for (uint32_t z = bind_offset_el.z;
1341 z < bind_offset_el.z + bind_extent_el.depth;
1342 z += block_shape_el.depth) {
1343 uint64_t subresource_offset_B;
1344 uint32_t subresource_x_offset, subresource_y_offset;
1345 isl_surf_get_image_offset_B_tile_sa(surf, mip_level, array_layer, z,
1346 &subresource_offset_B,
1347 &subresource_x_offset,
1348 &subresource_y_offset);
1349 assert(subresource_x_offset == 0 && subresource_y_offset == 0);
1350 assert(subresource_offset_B % ANV_SPARSE_BLOCK_SIZE == 0);
1351
1352 for (uint32_t y = bind_offset_el.y;
1353 y < bind_offset_el.y + bind_extent_el.height;
1354 y+= block_shape_el.height) {
1355 uint32_t line_block_offset = y / block_shape_el.height *
1356 blocks_per_line;
1357 uint64_t line_start_B = subresource_offset_B +
1358 line_block_offset * ANV_SPARSE_BLOCK_SIZE;
1359 uint64_t bind_offset_B = line_start_B +
1360 (bind_offset_el.x / block_shape_el.width) *
1361 ANV_SPARSE_BLOCK_SIZE;
1362
1363 VkSparseMemoryBind opaque_bind = {
1364 .resourceOffset = binding_plane_offset + bind_offset_B,
1365 .size = line_bind_size,
1366 .memory = bind->memory,
1367 .memoryOffset = memory_offset,
1368 .flags = bind->flags,
1369 };
1370
1371 memory_offset += line_bind_size;
1372
1373 assert(line_start_B % ANV_SPARSE_BLOCK_SIZE == 0);
1374 assert(opaque_bind.resourceOffset % ANV_SPARSE_BLOCK_SIZE == 0);
1375 assert(opaque_bind.size % ANV_SPARSE_BLOCK_SIZE == 0);
1376
1377 struct anv_vm_bind anv_bind = vk_bind_to_anv_vm_bind(sparse_data,
1378 &opaque_bind);
1379 VkResult result = anv_sparse_submission_add(device, submit,
1380 &anv_bind);
1381 if (result != VK_SUCCESS)
1382 return result;
1383 }
1384 }
1385
1386 return VK_SUCCESS;
1387 }
1388
1389 VkResult
anv_sparse_image_check_support(struct anv_physical_device * pdevice,VkImageCreateFlags flags,VkImageTiling tiling,VkSampleCountFlagBits samples,VkImageType type,VkFormat vk_format)1390 anv_sparse_image_check_support(struct anv_physical_device *pdevice,
1391 VkImageCreateFlags flags,
1392 VkImageTiling tiling,
1393 VkSampleCountFlagBits samples,
1394 VkImageType type,
1395 VkFormat vk_format)
1396 {
1397 assert(flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT);
1398
1399 /* The spec says:
1400 * "A sparse image created using VK_IMAGE_CREATE_SPARSE_BINDING_BIT (but
1401 * not VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) supports all formats that
1402 * non-sparse usage supports, and supports both VK_IMAGE_TILING_OPTIMAL
1403 * and VK_IMAGE_TILING_LINEAR tiling."
1404 */
1405 if (!(flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT))
1406 return VK_SUCCESS;
1407
1408 if (type == VK_IMAGE_TYPE_1D)
1409 return VK_ERROR_FORMAT_NOT_SUPPORTED;
1410
1411 /* From here on, these are the rules:
1412 * "A sparse image created using VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT
1413 * supports all non-compressed color formats with power-of-two element
1414 * size that non-sparse usage supports. Additional formats may also be
1415 * supported and can be queried via
1416 * vkGetPhysicalDeviceSparseImageFormatProperties.
1417 * VK_IMAGE_TILING_LINEAR tiling is not supported."
1418 */
1419
1420 /* We choose not to support sparse residency on emulated compressed
1421 * formats due to the additional image plane. It would make the
1422 * implementation extremely complicated.
1423 */
1424 if (anv_is_format_emulated(pdevice, vk_format))
1425 return VK_ERROR_FORMAT_NOT_SUPPORTED;
1426
1427 /* While the spec itself says linear is not supported (see above), deqp-vk
1428 * tries anyway to create linear sparse images, so we have to check for it.
1429 * This is also said in VUID-VkImageCreateInfo-tiling-04121:
1430 * "If tiling is VK_IMAGE_TILING_LINEAR, flags must not contain
1431 * VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT"
1432 */
1433 if (tiling == VK_IMAGE_TILING_LINEAR)
1434 return VK_ERROR_FORMAT_NOT_SUPPORTED;
1435
1436 if ((samples & VK_SAMPLE_COUNT_2_BIT &&
1437 !pdevice->vk.supported_features.sparseResidency2Samples) ||
1438 (samples & VK_SAMPLE_COUNT_4_BIT &&
1439 !pdevice->vk.supported_features.sparseResidency4Samples) ||
1440 (samples & VK_SAMPLE_COUNT_8_BIT &&
1441 !pdevice->vk.supported_features.sparseResidency8Samples) ||
1442 (samples & VK_SAMPLE_COUNT_16_BIT &&
1443 !pdevice->vk.supported_features.sparseResidency16Samples) ||
1444 samples & VK_SAMPLE_COUNT_32_BIT ||
1445 samples & VK_SAMPLE_COUNT_64_BIT)
1446 return VK_ERROR_FEATURE_NOT_PRESENT;
1447
1448 /* While the Vulkan spec allows us to support depth/stencil sparse images
1449 * everywhere, sometimes we're not able to have them with the tiling
1450 * formats that give us the standard block shapes. Having standard block
1451 * shapes is higher priority than supporting depth/stencil sparse images.
1452 *
1453 * Please see ISL's filter_tiling() functions for accurate explanations on
1454 * why depth/stencil images are not always supported with the tiling
1455 * formats we want. But in short: depth/stencil support in our HW is
1456 * limited to 2D and we can't build a 2D view of a 3D image with these
1457 * tiling formats due to the address swizzling being different.
1458 */
1459 VkImageAspectFlags aspects = vk_format_aspects(vk_format);
1460 if (aspects & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
1461 /* For multi-sampled images, the image layouts for color and
1462 * depth/stencil are different, and only the color layout is compatible
1463 * with the standard block shapes.
1464 */
1465 if (samples != VK_SAMPLE_COUNT_1_BIT)
1466 return VK_ERROR_FORMAT_NOT_SUPPORTED;
1467
1468 /* For 125+, isl_gfx125_filter_tiling() claims 3D is not supported.
1469 * For the previous platforms, isl_gfx6_filter_tiling() says only 2D is
1470 * supported.
1471 */
1472 if (pdevice->info.verx10 >= 125) {
1473 if (type == VK_IMAGE_TYPE_3D)
1474 return VK_ERROR_FORMAT_NOT_SUPPORTED;
1475 } else {
1476 if (type != VK_IMAGE_TYPE_2D)
1477 return VK_ERROR_FORMAT_NOT_SUPPORTED;
1478 }
1479 }
1480
1481 const struct anv_format *anv_format = anv_get_format(vk_format);
1482 if (!anv_format)
1483 return VK_ERROR_FORMAT_NOT_SUPPORTED;
1484
1485 for (int p = 0; p < anv_format->n_planes; p++) {
1486 enum isl_format isl_format = anv_format->planes[p].isl_format;
1487
1488 if (isl_format == ISL_FORMAT_UNSUPPORTED)
1489 return VK_ERROR_FORMAT_NOT_SUPPORTED;
1490
1491 const struct isl_format_layout *isl_layout =
1492 isl_format_get_layout(isl_format);
1493
1494 /* As quoted above, we only need to support the power-of-two formats.
1495 * The problem with the non-power-of-two formats is that we need an
1496 * integer number of pixels to fit into a sparse block, so we'd need the
1497 * sparse block sizes to be, for example, 192k for 24bpp.
1498 *
1499 * TODO: add support for these formats.
1500 */
1501 if (isl_layout->bpb != 8 && isl_layout->bpb != 16 &&
1502 isl_layout->bpb != 32 && isl_layout->bpb != 64 &&
1503 isl_layout->bpb != 128)
1504 return VK_ERROR_FORMAT_NOT_SUPPORTED;
1505
1506 /* ISL_TILING_64_XE2_BIT's block shapes are not always Vulkan's standard
1507 * block shapes, so exclude what's non-standard.
1508 */
1509 if (pdevice->info.ver == 20) {
1510 switch (samples) {
1511 case VK_SAMPLE_COUNT_2_BIT:
1512 if (isl_layout->bpb == 128)
1513 return VK_ERROR_FORMAT_NOT_SUPPORTED;
1514 break;
1515 case VK_SAMPLE_COUNT_8_BIT:
1516 if (isl_layout->bpb == 8 || isl_layout->bpb == 32)
1517 return VK_ERROR_FORMAT_NOT_SUPPORTED;
1518 break;
1519 case VK_SAMPLE_COUNT_16_BIT:
1520 if (isl_layout->bpb == 64)
1521 return VK_ERROR_FORMAT_NOT_SUPPORTED;
1522 break;
1523 default:
1524 break;
1525 }
1526 }
1527 }
1528
1529 /* These YUV formats are considered by Vulkan to be compressed 2x1 blocks.
1530 * We don't need to support them since they're compressed. On Gfx12 we
1531 * can't even have Tile64 for them. Once we do support these formats we'll
1532 * have to report the correct block shapes because dEQP cares about them,
1533 * and we'll have to adjust for the fact that ISL treats these as 16bpp 1x1
1534 * blocks instead of 32bpp 2x1 compressed blocks (as block shapes are
1535 * reported in units of compressed blocks).
1536 */
1537 if (vk_format == VK_FORMAT_G8B8G8R8_422_UNORM ||
1538 vk_format == VK_FORMAT_B8G8R8G8_422_UNORM)
1539 return VK_ERROR_FORMAT_NOT_SUPPORTED;
1540
1541 return VK_SUCCESS;
1542 }
1543