1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include <xf86drm.h>
31
32 #include "anv_private.h"
33 #include "anv_measure.h"
34
35 #include "common/intel_debug_identifier.h"
36
37 #include "genxml/gen8_pack.h"
38 #include "genxml/genX_bits.h"
39 #include "perf/intel_perf.h"
40
41 #include "util/u_debug.h"
42 #include "util/perf/u_trace.h"
43
44 /** \file anv_batch_chain.c
45 *
46 * This file contains functions related to anv_cmd_buffer as a data
47 * structure. This involves everything required to create and destroy
48 * the actual batch buffers as well as link them together and handle
49 * relocations and surface state. It specifically does *not* contain any
50 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
51 */
52
53 /*-----------------------------------------------------------------------*
54 * Functions related to anv_reloc_list
55 *-----------------------------------------------------------------------*/
56
57 VkResult
anv_reloc_list_init(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc)58 anv_reloc_list_init(struct anv_reloc_list *list,
59 const VkAllocationCallbacks *alloc)
60 {
61 memset(list, 0, sizeof(*list));
62 return VK_SUCCESS;
63 }
64
65 static VkResult
anv_reloc_list_init_clone(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,const struct anv_reloc_list * other_list)66 anv_reloc_list_init_clone(struct anv_reloc_list *list,
67 const VkAllocationCallbacks *alloc,
68 const struct anv_reloc_list *other_list)
69 {
70 list->num_relocs = other_list->num_relocs;
71 list->array_length = other_list->array_length;
72
73 if (list->num_relocs > 0) {
74 list->relocs =
75 vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
76 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
77 if (list->relocs == NULL)
78 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
79
80 list->reloc_bos =
81 vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
82 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
83 if (list->reloc_bos == NULL) {
84 vk_free(alloc, list->relocs);
85 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
86 }
87
88 memcpy(list->relocs, other_list->relocs,
89 list->array_length * sizeof(*list->relocs));
90 memcpy(list->reloc_bos, other_list->reloc_bos,
91 list->array_length * sizeof(*list->reloc_bos));
92 } else {
93 list->relocs = NULL;
94 list->reloc_bos = NULL;
95 }
96
97 list->dep_words = other_list->dep_words;
98
99 if (list->dep_words > 0) {
100 list->deps =
101 vk_alloc(alloc, list->dep_words * sizeof(BITSET_WORD), 8,
102 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
103 memcpy(list->deps, other_list->deps,
104 list->dep_words * sizeof(BITSET_WORD));
105 } else {
106 list->deps = NULL;
107 }
108
109 return VK_SUCCESS;
110 }
111
112 void
anv_reloc_list_finish(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc)113 anv_reloc_list_finish(struct anv_reloc_list *list,
114 const VkAllocationCallbacks *alloc)
115 {
116 vk_free(alloc, list->relocs);
117 vk_free(alloc, list->reloc_bos);
118 vk_free(alloc, list->deps);
119 }
120
121 static VkResult
anv_reloc_list_grow(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,size_t num_additional_relocs)122 anv_reloc_list_grow(struct anv_reloc_list *list,
123 const VkAllocationCallbacks *alloc,
124 size_t num_additional_relocs)
125 {
126 if (list->num_relocs + num_additional_relocs <= list->array_length)
127 return VK_SUCCESS;
128
129 size_t new_length = MAX2(16, list->array_length * 2);
130 while (new_length < list->num_relocs + num_additional_relocs)
131 new_length *= 2;
132
133 struct drm_i915_gem_relocation_entry *new_relocs =
134 vk_realloc(alloc, list->relocs,
135 new_length * sizeof(*list->relocs), 8,
136 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
137 if (new_relocs == NULL)
138 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
139 list->relocs = new_relocs;
140
141 struct anv_bo **new_reloc_bos =
142 vk_realloc(alloc, list->reloc_bos,
143 new_length * sizeof(*list->reloc_bos), 8,
144 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
145 if (new_reloc_bos == NULL)
146 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
147 list->reloc_bos = new_reloc_bos;
148
149 list->array_length = new_length;
150
151 return VK_SUCCESS;
152 }
153
154 static VkResult
anv_reloc_list_grow_deps(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,uint32_t min_num_words)155 anv_reloc_list_grow_deps(struct anv_reloc_list *list,
156 const VkAllocationCallbacks *alloc,
157 uint32_t min_num_words)
158 {
159 if (min_num_words <= list->dep_words)
160 return VK_SUCCESS;
161
162 uint32_t new_length = MAX2(32, list->dep_words * 2);
163 while (new_length < min_num_words)
164 new_length *= 2;
165
166 BITSET_WORD *new_deps =
167 vk_realloc(alloc, list->deps, new_length * sizeof(BITSET_WORD), 8,
168 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
169 if (new_deps == NULL)
170 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
171 list->deps = new_deps;
172
173 /* Zero out the new data */
174 memset(list->deps + list->dep_words, 0,
175 (new_length - list->dep_words) * sizeof(BITSET_WORD));
176 list->dep_words = new_length;
177
178 return VK_SUCCESS;
179 }
180
181 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
182
183 VkResult
anv_reloc_list_add_bo(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,struct anv_bo * target_bo)184 anv_reloc_list_add_bo(struct anv_reloc_list *list,
185 const VkAllocationCallbacks *alloc,
186 struct anv_bo *target_bo)
187 {
188 assert(!target_bo->is_wrapper);
189 assert(anv_bo_is_pinned(target_bo));
190
191 uint32_t idx = target_bo->gem_handle;
192 VkResult result = anv_reloc_list_grow_deps(list, alloc,
193 (idx / BITSET_WORDBITS) + 1);
194 if (unlikely(result != VK_SUCCESS))
195 return result;
196
197 BITSET_SET(list->deps, idx);
198
199 return VK_SUCCESS;
200 }
201
202 VkResult
anv_reloc_list_add(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,uint32_t offset,struct anv_bo * target_bo,uint32_t delta,uint64_t * address_u64_out)203 anv_reloc_list_add(struct anv_reloc_list *list,
204 const VkAllocationCallbacks *alloc,
205 uint32_t offset, struct anv_bo *target_bo, uint32_t delta,
206 uint64_t *address_u64_out)
207 {
208 struct drm_i915_gem_relocation_entry *entry;
209 int index;
210
211 struct anv_bo *unwrapped_target_bo = anv_bo_unwrap(target_bo);
212 uint64_t target_bo_offset = READ_ONCE(unwrapped_target_bo->offset);
213 if (address_u64_out)
214 *address_u64_out = target_bo_offset + delta;
215
216 assert(unwrapped_target_bo->gem_handle > 0);
217 assert(unwrapped_target_bo->refcount > 0);
218
219 if (anv_bo_is_pinned(unwrapped_target_bo))
220 return anv_reloc_list_add_bo(list, alloc, unwrapped_target_bo);
221
222 VkResult result = anv_reloc_list_grow(list, alloc, 1);
223 if (result != VK_SUCCESS)
224 return result;
225
226 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
227 index = list->num_relocs++;
228 list->reloc_bos[index] = target_bo;
229 entry = &list->relocs[index];
230 entry->target_handle = -1; /* See also anv_cmd_buffer_process_relocs() */
231 entry->delta = delta;
232 entry->offset = offset;
233 entry->presumed_offset = target_bo_offset;
234 entry->read_domains = 0;
235 entry->write_domain = 0;
236 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
237
238 return VK_SUCCESS;
239 }
240
241 static void
anv_reloc_list_clear(struct anv_reloc_list * list)242 anv_reloc_list_clear(struct anv_reloc_list *list)
243 {
244 list->num_relocs = 0;
245 if (list->dep_words > 0)
246 memset(list->deps, 0, list->dep_words * sizeof(BITSET_WORD));
247 }
248
249 static VkResult
anv_reloc_list_append(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,struct anv_reloc_list * other,uint32_t offset)250 anv_reloc_list_append(struct anv_reloc_list *list,
251 const VkAllocationCallbacks *alloc,
252 struct anv_reloc_list *other, uint32_t offset)
253 {
254 VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
255 if (result != VK_SUCCESS)
256 return result;
257
258 if (other->num_relocs > 0) {
259 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
260 other->num_relocs * sizeof(other->relocs[0]));
261 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
262 other->num_relocs * sizeof(other->reloc_bos[0]));
263
264 for (uint32_t i = 0; i < other->num_relocs; i++)
265 list->relocs[i + list->num_relocs].offset += offset;
266
267 list->num_relocs += other->num_relocs;
268 }
269
270 anv_reloc_list_grow_deps(list, alloc, other->dep_words);
271 for (uint32_t w = 0; w < other->dep_words; w++)
272 list->deps[w] |= other->deps[w];
273
274 return VK_SUCCESS;
275 }
276
277 /*-----------------------------------------------------------------------*
278 * Functions related to anv_batch
279 *-----------------------------------------------------------------------*/
280
281 void *
anv_batch_emit_dwords(struct anv_batch * batch,int num_dwords)282 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
283 {
284 if (batch->next + num_dwords * 4 > batch->end) {
285 VkResult result = batch->extend_cb(batch, batch->user_data);
286 if (result != VK_SUCCESS) {
287 anv_batch_set_error(batch, result);
288 return NULL;
289 }
290 }
291
292 void *p = batch->next;
293
294 batch->next += num_dwords * 4;
295 assert(batch->next <= batch->end);
296
297 return p;
298 }
299
300 struct anv_address
anv_batch_address(struct anv_batch * batch,void * batch_location)301 anv_batch_address(struct anv_batch *batch, void *batch_location)
302 {
303 assert(batch->start <= batch_location);
304
305 /* Allow a jump at the current location of the batch. */
306 assert(batch->next >= batch_location);
307
308 return anv_address_add(batch->start_addr, batch_location - batch->start);
309 }
310
311 void
anv_batch_emit_batch(struct anv_batch * batch,struct anv_batch * other)312 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
313 {
314 uint32_t size, offset;
315
316 size = other->next - other->start;
317 assert(size % 4 == 0);
318
319 if (batch->next + size > batch->end) {
320 VkResult result = batch->extend_cb(batch, batch->user_data);
321 if (result != VK_SUCCESS) {
322 anv_batch_set_error(batch, result);
323 return;
324 }
325 }
326
327 assert(batch->next + size <= batch->end);
328
329 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
330 memcpy(batch->next, other->start, size);
331
332 offset = batch->next - batch->start;
333 VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
334 other->relocs, offset);
335 if (result != VK_SUCCESS) {
336 anv_batch_set_error(batch, result);
337 return;
338 }
339
340 batch->next += size;
341 }
342
343 /*-----------------------------------------------------------------------*
344 * Functions related to anv_batch_bo
345 *-----------------------------------------------------------------------*/
346
347 static VkResult
anv_batch_bo_create(struct anv_cmd_buffer * cmd_buffer,uint32_t size,struct anv_batch_bo ** bbo_out)348 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
349 uint32_t size,
350 struct anv_batch_bo **bbo_out)
351 {
352 VkResult result;
353
354 struct anv_batch_bo *bbo = vk_zalloc(&cmd_buffer->vk.pool->alloc, sizeof(*bbo),
355 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
356 if (bbo == NULL)
357 return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
358
359 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
360 size, &bbo->bo);
361 if (result != VK_SUCCESS)
362 goto fail_alloc;
363
364 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->vk.pool->alloc);
365 if (result != VK_SUCCESS)
366 goto fail_bo_alloc;
367
368 *bbo_out = bbo;
369
370 return VK_SUCCESS;
371
372 fail_bo_alloc:
373 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
374 fail_alloc:
375 vk_free(&cmd_buffer->vk.pool->alloc, bbo);
376
377 return result;
378 }
379
380 static VkResult
anv_batch_bo_clone(struct anv_cmd_buffer * cmd_buffer,const struct anv_batch_bo * other_bbo,struct anv_batch_bo ** bbo_out)381 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
382 const struct anv_batch_bo *other_bbo,
383 struct anv_batch_bo **bbo_out)
384 {
385 VkResult result;
386
387 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->vk.pool->alloc, sizeof(*bbo),
388 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
389 if (bbo == NULL)
390 return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
391
392 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
393 other_bbo->bo->size, &bbo->bo);
394 if (result != VK_SUCCESS)
395 goto fail_alloc;
396
397 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->vk.pool->alloc,
398 &other_bbo->relocs);
399 if (result != VK_SUCCESS)
400 goto fail_bo_alloc;
401
402 bbo->length = other_bbo->length;
403 memcpy(bbo->bo->map, other_bbo->bo->map, other_bbo->length);
404 *bbo_out = bbo;
405
406 return VK_SUCCESS;
407
408 fail_bo_alloc:
409 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
410 fail_alloc:
411 vk_free(&cmd_buffer->vk.pool->alloc, bbo);
412
413 return result;
414 }
415
416 static void
anv_batch_bo_start(struct anv_batch_bo * bbo,struct anv_batch * batch,size_t batch_padding)417 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
418 size_t batch_padding)
419 {
420 anv_batch_set_storage(batch, (struct anv_address) { .bo = bbo->bo, },
421 bbo->bo->map, bbo->bo->size - batch_padding);
422 batch->relocs = &bbo->relocs;
423 anv_reloc_list_clear(&bbo->relocs);
424 }
425
426 static void
anv_batch_bo_continue(struct anv_batch_bo * bbo,struct anv_batch * batch,size_t batch_padding)427 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
428 size_t batch_padding)
429 {
430 batch->start_addr = (struct anv_address) { .bo = bbo->bo, };
431 batch->start = bbo->bo->map;
432 batch->next = bbo->bo->map + bbo->length;
433 batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
434 batch->relocs = &bbo->relocs;
435 }
436
437 static void
anv_batch_bo_finish(struct anv_batch_bo * bbo,struct anv_batch * batch)438 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
439 {
440 assert(batch->start == bbo->bo->map);
441 bbo->length = batch->next - batch->start;
442 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
443 }
444
445 static VkResult
anv_batch_bo_grow(struct anv_cmd_buffer * cmd_buffer,struct anv_batch_bo * bbo,struct anv_batch * batch,size_t additional,size_t batch_padding)446 anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
447 struct anv_batch *batch, size_t additional,
448 size_t batch_padding)
449 {
450 assert(batch->start == bbo->bo->map);
451 bbo->length = batch->next - batch->start;
452
453 size_t new_size = bbo->bo->size;
454 while (new_size <= bbo->length + additional + batch_padding)
455 new_size *= 2;
456
457 if (new_size == bbo->bo->size)
458 return VK_SUCCESS;
459
460 struct anv_bo *new_bo;
461 VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
462 new_size, &new_bo);
463 if (result != VK_SUCCESS)
464 return result;
465
466 memcpy(new_bo->map, bbo->bo->map, bbo->length);
467
468 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
469
470 bbo->bo = new_bo;
471 anv_batch_bo_continue(bbo, batch, batch_padding);
472
473 return VK_SUCCESS;
474 }
475
476 static void
anv_batch_bo_link(struct anv_cmd_buffer * cmd_buffer,struct anv_batch_bo * prev_bbo,struct anv_batch_bo * next_bbo,uint32_t next_bbo_offset)477 anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
478 struct anv_batch_bo *prev_bbo,
479 struct anv_batch_bo *next_bbo,
480 uint32_t next_bbo_offset)
481 {
482 const uint32_t bb_start_offset =
483 prev_bbo->length - GFX8_MI_BATCH_BUFFER_START_length * 4;
484 ASSERTED const uint32_t *bb_start = prev_bbo->bo->map + bb_start_offset;
485
486 /* Make sure we're looking at a MI_BATCH_BUFFER_START */
487 assert(((*bb_start >> 29) & 0x07) == 0);
488 assert(((*bb_start >> 23) & 0x3f) == 49);
489
490 if (anv_use_relocations(cmd_buffer->device->physical)) {
491 uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
492 assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
493
494 prev_bbo->relocs.reloc_bos[reloc_idx] = next_bbo->bo;
495 prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
496
497 /* Use a bogus presumed offset to force a relocation */
498 prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
499 } else {
500 assert(anv_bo_is_pinned(prev_bbo->bo));
501 assert(anv_bo_is_pinned(next_bbo->bo));
502
503 write_reloc(cmd_buffer->device,
504 prev_bbo->bo->map + bb_start_offset + 4,
505 next_bbo->bo->offset + next_bbo_offset, true);
506 }
507 }
508
509 static void
anv_batch_bo_destroy(struct anv_batch_bo * bbo,struct anv_cmd_buffer * cmd_buffer)510 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
511 struct anv_cmd_buffer *cmd_buffer)
512 {
513 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->vk.pool->alloc);
514 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
515 vk_free(&cmd_buffer->vk.pool->alloc, bbo);
516 }
517
518 static VkResult
anv_batch_bo_list_clone(const struct list_head * list,struct anv_cmd_buffer * cmd_buffer,struct list_head * new_list)519 anv_batch_bo_list_clone(const struct list_head *list,
520 struct anv_cmd_buffer *cmd_buffer,
521 struct list_head *new_list)
522 {
523 VkResult result = VK_SUCCESS;
524
525 list_inithead(new_list);
526
527 struct anv_batch_bo *prev_bbo = NULL;
528 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
529 struct anv_batch_bo *new_bbo = NULL;
530 result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
531 if (result != VK_SUCCESS)
532 break;
533 list_addtail(&new_bbo->link, new_list);
534
535 if (prev_bbo)
536 anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
537
538 prev_bbo = new_bbo;
539 }
540
541 if (result != VK_SUCCESS) {
542 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link) {
543 list_del(&bbo->link);
544 anv_batch_bo_destroy(bbo, cmd_buffer);
545 }
546 }
547
548 return result;
549 }
550
551 /*-----------------------------------------------------------------------*
552 * Functions related to anv_batch_bo
553 *-----------------------------------------------------------------------*/
554
555 static struct anv_batch_bo *
anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer * cmd_buffer)556 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
557 {
558 return list_entry(cmd_buffer->batch_bos.prev, struct anv_batch_bo, link);
559 }
560
561 struct anv_address
anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer * cmd_buffer)562 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
563 {
564 struct anv_state_pool *pool = anv_binding_table_pool(cmd_buffer->device);
565 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
566 return (struct anv_address) {
567 .bo = pool->block_pool.bo,
568 .offset = bt_block->offset - pool->start_offset,
569 };
570 }
571
572 static void
emit_batch_buffer_start(struct anv_cmd_buffer * cmd_buffer,struct anv_bo * bo,uint32_t offset)573 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
574 struct anv_bo *bo, uint32_t offset)
575 {
576 /* In gfx8+ the address field grew to two dwords to accommodate 48 bit
577 * offsets. The high 16 bits are in the last dword, so we can use the gfx8
578 * version in either case, as long as we set the instruction length in the
579 * header accordingly. This means that we always emit three dwords here
580 * and all the padding and adjustment we do in this file works for all
581 * gens.
582 */
583
584 #define GFX7_MI_BATCH_BUFFER_START_length 2
585 #define GFX7_MI_BATCH_BUFFER_START_length_bias 2
586
587 const uint32_t gfx7_length =
588 GFX7_MI_BATCH_BUFFER_START_length - GFX7_MI_BATCH_BUFFER_START_length_bias;
589 const uint32_t gfx8_length =
590 GFX8_MI_BATCH_BUFFER_START_length - GFX8_MI_BATCH_BUFFER_START_length_bias;
591
592 anv_batch_emit(&cmd_buffer->batch, GFX8_MI_BATCH_BUFFER_START, bbs) {
593 bbs.DWordLength = cmd_buffer->device->info->ver < 8 ?
594 gfx7_length : gfx8_length;
595 bbs.SecondLevelBatchBuffer = Firstlevelbatch;
596 bbs.AddressSpaceIndicator = ASI_PPGTT;
597 bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset };
598 }
599 }
600
601 static void
cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer * cmd_buffer,struct anv_batch_bo * bbo)602 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
603 struct anv_batch_bo *bbo)
604 {
605 struct anv_batch *batch = &cmd_buffer->batch;
606 struct anv_batch_bo *current_bbo =
607 anv_cmd_buffer_current_batch_bo(cmd_buffer);
608
609 /* We set the end of the batch a little short so we would be sure we
610 * have room for the chaining command. Since we're about to emit the
611 * chaining command, let's set it back where it should go.
612 */
613 batch->end += GFX8_MI_BATCH_BUFFER_START_length * 4;
614 assert(batch->end == current_bbo->bo->map + current_bbo->bo->size);
615
616 emit_batch_buffer_start(cmd_buffer, bbo->bo, 0);
617
618 anv_batch_bo_finish(current_bbo, batch);
619 }
620
621 static void
anv_cmd_buffer_record_chain_submit(struct anv_cmd_buffer * cmd_buffer_from,struct anv_cmd_buffer * cmd_buffer_to)622 anv_cmd_buffer_record_chain_submit(struct anv_cmd_buffer *cmd_buffer_from,
623 struct anv_cmd_buffer *cmd_buffer_to)
624 {
625 assert(!anv_use_relocations(cmd_buffer_from->device->physical));
626
627 uint32_t *bb_start = cmd_buffer_from->batch_end;
628
629 struct anv_batch_bo *last_bbo =
630 list_last_entry(&cmd_buffer_from->batch_bos, struct anv_batch_bo, link);
631 struct anv_batch_bo *first_bbo =
632 list_first_entry(&cmd_buffer_to->batch_bos, struct anv_batch_bo, link);
633
634 struct GFX8_MI_BATCH_BUFFER_START gen_bb_start = {
635 __anv_cmd_header(GFX8_MI_BATCH_BUFFER_START),
636 .SecondLevelBatchBuffer = Firstlevelbatch,
637 .AddressSpaceIndicator = ASI_PPGTT,
638 .BatchBufferStartAddress = (struct anv_address) { first_bbo->bo, 0 },
639 };
640 struct anv_batch local_batch = {
641 .start = last_bbo->bo->map,
642 .end = last_bbo->bo->map + last_bbo->bo->size,
643 .relocs = &last_bbo->relocs,
644 .alloc = &cmd_buffer_from->vk.pool->alloc,
645 };
646
647 __anv_cmd_pack(GFX8_MI_BATCH_BUFFER_START)(&local_batch, bb_start, &gen_bb_start);
648
649 last_bbo->chained = true;
650 }
651
652 static void
anv_cmd_buffer_record_end_submit(struct anv_cmd_buffer * cmd_buffer)653 anv_cmd_buffer_record_end_submit(struct anv_cmd_buffer *cmd_buffer)
654 {
655 assert(!anv_use_relocations(cmd_buffer->device->physical));
656
657 struct anv_batch_bo *last_bbo =
658 list_last_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
659 last_bbo->chained = false;
660
661 uint32_t *batch = cmd_buffer->batch_end;
662 anv_pack_struct(batch, GFX8_MI_BATCH_BUFFER_END,
663 __anv_cmd_header(GFX8_MI_BATCH_BUFFER_END));
664 }
665
666 static VkResult
anv_cmd_buffer_chain_batch(struct anv_batch * batch,void * _data)667 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
668 {
669 struct anv_cmd_buffer *cmd_buffer = _data;
670 struct anv_batch_bo *new_bbo = NULL;
671 /* Cap reallocation to chunk. */
672 uint32_t alloc_size = MIN2(cmd_buffer->total_batch_size,
673 ANV_MAX_CMD_BUFFER_BATCH_SIZE);
674
675 VkResult result = anv_batch_bo_create(cmd_buffer, alloc_size, &new_bbo);
676 if (result != VK_SUCCESS)
677 return result;
678
679 cmd_buffer->total_batch_size += alloc_size;
680
681 struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
682 if (seen_bbo == NULL) {
683 anv_batch_bo_destroy(new_bbo, cmd_buffer);
684 return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
685 }
686 *seen_bbo = new_bbo;
687
688 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
689
690 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
691
692 anv_batch_bo_start(new_bbo, batch, GFX8_MI_BATCH_BUFFER_START_length * 4);
693
694 return VK_SUCCESS;
695 }
696
697 static VkResult
anv_cmd_buffer_grow_batch(struct anv_batch * batch,void * _data)698 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
699 {
700 struct anv_cmd_buffer *cmd_buffer = _data;
701 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
702
703 anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
704 GFX8_MI_BATCH_BUFFER_START_length * 4);
705
706 return VK_SUCCESS;
707 }
708
709 /** Allocate a binding table
710 *
711 * This function allocates a binding table. This is a bit more complicated
712 * than one would think due to a combination of Vulkan driver design and some
713 * unfortunate hardware restrictions.
714 *
715 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
716 * the binding table pointer which means that all binding tables need to live
717 * in the bottom 64k of surface state base address. The way the GL driver has
718 * classically dealt with this restriction is to emit all surface states
719 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
720 * isn't really an option in Vulkan for a couple of reasons:
721 *
722 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
723 * to live in their own buffer and we have to be able to re-emit
724 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
725 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
726 * (it's not that hard to hit 64k of just binding tables), we allocate
727 * surface state objects up-front when VkImageView is created. In order
728 * for this to work, surface state objects need to be allocated from a
729 * global buffer.
730 *
731 * 2) We tried to design the surface state system in such a way that it's
732 * already ready for bindless texturing. The way bindless texturing works
733 * on our hardware is that you have a big pool of surface state objects
734 * (with its own state base address) and the bindless handles are simply
735 * offsets into that pool. With the architecture we chose, we already
736 * have that pool and it's exactly the same pool that we use for regular
737 * surface states so we should already be ready for bindless.
738 *
739 * 3) For render targets, we need to be able to fill out the surface states
740 * later in vkBeginRenderPass so that we can assign clear colors
741 * correctly. One way to do this would be to just create the surface
742 * state data and then repeatedly copy it into the surface state BO every
743 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
744 * rather annoying and just being able to allocate them up-front and
745 * re-use them for the entire render pass.
746 *
747 * While none of these are technically blockers for emitting state on the fly
748 * like we do in GL, the ability to have a single surface state pool is
749 * simplifies things greatly. Unfortunately, it comes at a cost...
750 *
751 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
752 * place the binding tables just anywhere in surface state base address.
753 * Because 64k isn't a whole lot of space, we can't simply restrict the
754 * surface state buffer to 64k, we have to be more clever. The solution we've
755 * chosen is to have a block pool with a maximum size of 2G that starts at
756 * zero and grows in both directions. All surface states are allocated from
757 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
758 * binding tables from the bottom of the pool (negative offsets). Every time
759 * we allocate a new binding table block, we set surface state base address to
760 * point to the bottom of the binding table block. This way all of the
761 * binding tables in the block are in the bottom 64k of surface state base
762 * address. When we fill out the binding table, we add the distance between
763 * the bottom of our binding table block and zero of the block pool to the
764 * surface state offsets so that they are correct relative to out new surface
765 * state base address at the bottom of the binding table block.
766 *
767 * \see adjust_relocations_from_block_pool()
768 * \see adjust_relocations_too_block_pool()
769 *
770 * \param[in] entries The number of surface state entries the binding
771 * table should be able to hold.
772 *
773 * \param[out] state_offset The offset surface surface state base address
774 * where the surface states live. This must be
775 * added to the surface state offset when it is
776 * written into the binding table entry.
777 *
778 * \return An anv_state representing the binding table
779 */
780 struct anv_state
anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer * cmd_buffer,uint32_t entries,uint32_t * state_offset)781 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
782 uint32_t entries, uint32_t *state_offset)
783 {
784 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
785
786 uint32_t bt_size = align(entries * 4, 32);
787
788 struct anv_state state = cmd_buffer->bt_next;
789 if (bt_size > state.alloc_size)
790 return (struct anv_state) { 0 };
791
792 state.alloc_size = bt_size;
793 cmd_buffer->bt_next.offset += bt_size;
794 cmd_buffer->bt_next.map += bt_size;
795 cmd_buffer->bt_next.alloc_size -= bt_size;
796
797 assert(bt_block->offset < 0);
798 *state_offset = -bt_block->offset;
799
800 return state;
801 }
802
803 struct anv_state
anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer * cmd_buffer)804 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
805 {
806 struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
807 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
808 isl_dev->ss.size, isl_dev->ss.align);
809 }
810
811 struct anv_state
anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer * cmd_buffer,uint32_t size,uint32_t alignment)812 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
813 uint32_t size, uint32_t alignment)
814 {
815 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
816 size, alignment);
817 }
818
819 VkResult
anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer * cmd_buffer)820 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
821 {
822 struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
823 if (bt_block == NULL) {
824 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
825 return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
826 }
827
828 *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
829
830 /* The bt_next state is a rolling state (we update it as we suballocate
831 * from it) which is relative to the start of the binding table block.
832 */
833 cmd_buffer->bt_next = *bt_block;
834 cmd_buffer->bt_next.offset = 0;
835
836 return VK_SUCCESS;
837 }
838
839 VkResult
anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer * cmd_buffer)840 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
841 {
842 struct anv_batch_bo *batch_bo = NULL;
843 VkResult result;
844
845 list_inithead(&cmd_buffer->batch_bos);
846
847 cmd_buffer->total_batch_size = ANV_MIN_CMD_BUFFER_BATCH_SIZE;
848
849 result = anv_batch_bo_create(cmd_buffer,
850 cmd_buffer->total_batch_size,
851 &batch_bo);
852 if (result != VK_SUCCESS)
853 return result;
854
855 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
856
857 cmd_buffer->batch.alloc = &cmd_buffer->vk.pool->alloc;
858 cmd_buffer->batch.user_data = cmd_buffer;
859
860 if (cmd_buffer->device->can_chain_batches) {
861 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
862 } else {
863 cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
864 }
865
866 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
867 GFX8_MI_BATCH_BUFFER_START_length * 4);
868
869 int success = u_vector_init_pow2(&cmd_buffer->seen_bbos, 8,
870 sizeof(struct anv_bo *));
871 if (!success)
872 goto fail_batch_bo;
873
874 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
875
876 success = u_vector_init(&cmd_buffer->bt_block_states, 8,
877 sizeof(struct anv_state));
878 if (!success)
879 goto fail_seen_bbos;
880
881 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
882 &cmd_buffer->vk.pool->alloc);
883 if (result != VK_SUCCESS)
884 goto fail_bt_blocks;
885 cmd_buffer->last_ss_pool_center = 0;
886
887 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
888 if (result != VK_SUCCESS)
889 goto fail_bt_blocks;
890
891 return VK_SUCCESS;
892
893 fail_bt_blocks:
894 u_vector_finish(&cmd_buffer->bt_block_states);
895 fail_seen_bbos:
896 u_vector_finish(&cmd_buffer->seen_bbos);
897 fail_batch_bo:
898 anv_batch_bo_destroy(batch_bo, cmd_buffer);
899
900 return result;
901 }
902
903 void
anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer * cmd_buffer)904 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
905 {
906 struct anv_state *bt_block;
907 u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
908 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
909 u_vector_finish(&cmd_buffer->bt_block_states);
910
911 anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->vk.pool->alloc);
912
913 u_vector_finish(&cmd_buffer->seen_bbos);
914
915 /* Destroy all of the batch buffers */
916 list_for_each_entry_safe(struct anv_batch_bo, bbo,
917 &cmd_buffer->batch_bos, link) {
918 list_del(&bbo->link);
919 anv_batch_bo_destroy(bbo, cmd_buffer);
920 }
921 }
922
923 void
anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer * cmd_buffer)924 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
925 {
926 /* Delete all but the first batch bo */
927 assert(!list_is_empty(&cmd_buffer->batch_bos));
928 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
929 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
930 list_del(&bbo->link);
931 anv_batch_bo_destroy(bbo, cmd_buffer);
932 }
933 assert(!list_is_empty(&cmd_buffer->batch_bos));
934
935 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
936 &cmd_buffer->batch,
937 GFX8_MI_BATCH_BUFFER_START_length * 4);
938
939 while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
940 struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
941 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
942 }
943 assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
944 cmd_buffer->bt_next = *(struct anv_state *)u_vector_head(&cmd_buffer->bt_block_states);
945 cmd_buffer->bt_next.offset = 0;
946
947 anv_reloc_list_clear(&cmd_buffer->surface_relocs);
948 cmd_buffer->last_ss_pool_center = 0;
949
950 /* Reset the list of seen buffers */
951 cmd_buffer->seen_bbos.head = 0;
952 cmd_buffer->seen_bbos.tail = 0;
953
954 struct anv_batch_bo *first_bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
955
956 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = first_bbo;
957
958
959 assert(!cmd_buffer->device->can_chain_batches ||
960 first_bbo->bo->size == ANV_MIN_CMD_BUFFER_BATCH_SIZE);
961 cmd_buffer->total_batch_size = first_bbo->bo->size;
962 }
963
964 void
anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer * cmd_buffer)965 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
966 {
967 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
968
969 if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
970 /* When we start a batch buffer, we subtract a certain amount of
971 * padding from the end to ensure that we always have room to emit a
972 * BATCH_BUFFER_START to chain to the next BO. We need to remove
973 * that padding before we end the batch; otherwise, we may end up
974 * with our BATCH_BUFFER_END in another BO.
975 */
976 cmd_buffer->batch.end += GFX8_MI_BATCH_BUFFER_START_length * 4;
977 assert(cmd_buffer->batch.start == batch_bo->bo->map);
978 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
979
980 /* Save end instruction location to override it later. */
981 cmd_buffer->batch_end = cmd_buffer->batch.next;
982
983 /* If we can chain this command buffer to another one, leave some place
984 * for the jump instruction.
985 */
986 batch_bo->chained = anv_cmd_buffer_is_chainable(cmd_buffer);
987 if (batch_bo->chained)
988 emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
989 else
990 anv_batch_emit(&cmd_buffer->batch, GFX8_MI_BATCH_BUFFER_END, bbe);
991
992 /* Round batch up to an even number of dwords. */
993 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
994 anv_batch_emit(&cmd_buffer->batch, GFX8_MI_NOOP, noop);
995
996 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
997 } else {
998 assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
999 /* If this is a secondary command buffer, we need to determine the
1000 * mode in which it will be executed with vkExecuteCommands. We
1001 * determine this statically here so that this stays in sync with the
1002 * actual ExecuteCommands implementation.
1003 */
1004 const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
1005 if (!cmd_buffer->device->can_chain_batches) {
1006 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
1007 } else if (cmd_buffer->device->physical->use_call_secondary) {
1008 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN;
1009 /* If the secondary command buffer begins & ends in the same BO and
1010 * its length is less than the length of CS prefetch, add some NOOPs
1011 * instructions so the last MI_BATCH_BUFFER_START is outside the CS
1012 * prefetch.
1013 */
1014 if (cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) {
1015 const struct intel_device_info *devinfo = cmd_buffer->device->info;
1016 const enum intel_engine_class engine_class = cmd_buffer->queue_family->engine_class;
1017 /* Careful to have everything in signed integer. */
1018 int32_t prefetch_len = devinfo->engine_class_prefetch[engine_class];
1019 int batch_len = cmd_buffer->batch.next - cmd_buffer->batch.start;
1020
1021 for (int32_t i = 0; i < (prefetch_len - batch_len); i += 4)
1022 anv_batch_emit(&cmd_buffer->batch, GFX8_MI_NOOP, noop);
1023 }
1024
1025 void *jump_addr =
1026 anv_batch_emitn(&cmd_buffer->batch,
1027 GFX8_MI_BATCH_BUFFER_START_length,
1028 GFX8_MI_BATCH_BUFFER_START,
1029 .AddressSpaceIndicator = ASI_PPGTT,
1030 .SecondLevelBatchBuffer = Firstlevelbatch) +
1031 (GFX8_MI_BATCH_BUFFER_START_BatchBufferStartAddress_start / 8);
1032 cmd_buffer->return_addr = anv_batch_address(&cmd_buffer->batch, jump_addr);
1033
1034 /* The emit above may have caused us to chain batch buffers which
1035 * would mean that batch_bo is no longer valid.
1036 */
1037 batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
1038 } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
1039 (length < ANV_MIN_CMD_BUFFER_BATCH_SIZE / 2)) {
1040 /* If the secondary has exactly one batch buffer in its list *and*
1041 * that batch buffer is less than half of the maximum size, we're
1042 * probably better of simply copying it into our batch.
1043 */
1044 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
1045 } else if (!(cmd_buffer->usage_flags &
1046 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
1047 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
1048
1049 /* In order to chain, we need this command buffer to contain an
1050 * MI_BATCH_BUFFER_START which will jump back to the calling batch.
1051 * It doesn't matter where it points now so long as has a valid
1052 * relocation. We'll adjust it later as part of the chaining
1053 * process.
1054 *
1055 * We set the end of the batch a little short so we would be sure we
1056 * have room for the chaining command. Since we're about to emit the
1057 * chaining command, let's set it back where it should go.
1058 */
1059 cmd_buffer->batch.end += GFX8_MI_BATCH_BUFFER_START_length * 4;
1060 assert(cmd_buffer->batch.start == batch_bo->bo->map);
1061 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
1062
1063 emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
1064 assert(cmd_buffer->batch.start == batch_bo->bo->map);
1065 } else {
1066 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
1067 }
1068 }
1069
1070 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
1071 }
1072
1073 static VkResult
anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer * cmd_buffer,struct list_head * list)1074 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
1075 struct list_head *list)
1076 {
1077 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
1078 struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
1079 if (bbo_ptr == NULL)
1080 return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
1081
1082 *bbo_ptr = bbo;
1083 }
1084
1085 return VK_SUCCESS;
1086 }
1087
1088 void
anv_cmd_buffer_add_secondary(struct anv_cmd_buffer * primary,struct anv_cmd_buffer * secondary)1089 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1090 struct anv_cmd_buffer *secondary)
1091 {
1092 anv_measure_add_secondary(primary, secondary);
1093 switch (secondary->exec_mode) {
1094 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
1095 anv_batch_emit_batch(&primary->batch, &secondary->batch);
1096 break;
1097 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
1098 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
1099 unsigned length = secondary->batch.end - secondary->batch.start;
1100 anv_batch_bo_grow(primary, bbo, &primary->batch, length,
1101 GFX8_MI_BATCH_BUFFER_START_length * 4);
1102 anv_batch_emit_batch(&primary->batch, &secondary->batch);
1103 break;
1104 }
1105 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
1106 struct anv_batch_bo *first_bbo =
1107 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1108 struct anv_batch_bo *last_bbo =
1109 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1110
1111 emit_batch_buffer_start(primary, first_bbo->bo, 0);
1112
1113 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
1114 assert(primary->batch.start == this_bbo->bo->map);
1115 uint32_t offset = primary->batch.next - primary->batch.start;
1116
1117 /* Make the tail of the secondary point back to right after the
1118 * MI_BATCH_BUFFER_START in the primary batch.
1119 */
1120 anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
1121
1122 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1123 break;
1124 }
1125 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
1126 struct list_head copy_list;
1127 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
1128 secondary,
1129 ©_list);
1130 if (result != VK_SUCCESS)
1131 return; /* FIXME */
1132
1133 anv_cmd_buffer_add_seen_bbos(primary, ©_list);
1134
1135 struct anv_batch_bo *first_bbo =
1136 list_first_entry(©_list, struct anv_batch_bo, link);
1137 struct anv_batch_bo *last_bbo =
1138 list_last_entry(©_list, struct anv_batch_bo, link);
1139
1140 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
1141
1142 list_splicetail(©_list, &primary->batch_bos);
1143
1144 anv_batch_bo_continue(last_bbo, &primary->batch,
1145 GFX8_MI_BATCH_BUFFER_START_length * 4);
1146 break;
1147 }
1148 case ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN: {
1149 struct anv_batch_bo *first_bbo =
1150 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1151
1152 uint64_t *write_return_addr =
1153 anv_batch_emitn(&primary->batch,
1154 GFX8_MI_STORE_DATA_IMM_length + 1 /* QWord write */,
1155 GFX8_MI_STORE_DATA_IMM,
1156 .Address = secondary->return_addr)
1157 + (GFX8_MI_STORE_DATA_IMM_ImmediateData_start / 8);
1158
1159 emit_batch_buffer_start(primary, first_bbo->bo, 0);
1160
1161 *write_return_addr =
1162 anv_address_physical(anv_batch_address(&primary->batch,
1163 primary->batch.next));
1164
1165 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1166 break;
1167 }
1168 default:
1169 assert(!"Invalid execution mode");
1170 }
1171
1172 anv_reloc_list_append(&primary->surface_relocs, &primary->vk.pool->alloc,
1173 &secondary->surface_relocs, 0);
1174 }
1175
1176 struct anv_execbuf {
1177 struct drm_i915_gem_execbuffer2 execbuf;
1178
1179 struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
1180
1181 struct drm_i915_gem_exec_object2 * objects;
1182 uint32_t bo_count;
1183 struct anv_bo ** bos;
1184
1185 /* Allocated length of the 'objects' and 'bos' arrays */
1186 uint32_t array_length;
1187
1188 uint32_t syncobj_count;
1189 uint32_t syncobj_array_length;
1190 struct drm_i915_gem_exec_fence * syncobjs;
1191 uint64_t * syncobj_values;
1192
1193 /* List of relocations for surface states, only used with platforms not
1194 * using softpin.
1195 */
1196 void * surface_states_relocs;
1197
1198 uint32_t cmd_buffer_count;
1199 struct anv_query_pool *perf_query_pool;
1200
1201 /* Indicates whether any of the command buffers have relocations. This
1202 * doesn't not necessarily mean we'll need the kernel to process them. It
1203 * might be that a previous execbuf has already placed things in the VMA
1204 * and we can make i915 skip the relocations.
1205 */
1206 bool has_relocs;
1207
1208 const VkAllocationCallbacks * alloc;
1209 VkSystemAllocationScope alloc_scope;
1210
1211 int perf_query_pass;
1212 };
1213
1214 static void
anv_execbuf_finish(struct anv_execbuf * exec)1215 anv_execbuf_finish(struct anv_execbuf *exec)
1216 {
1217 vk_free(exec->alloc, exec->syncobjs);
1218 vk_free(exec->alloc, exec->syncobj_values);
1219 vk_free(exec->alloc, exec->surface_states_relocs);
1220 vk_free(exec->alloc, exec->objects);
1221 vk_free(exec->alloc, exec->bos);
1222 }
1223
1224 static void
anv_execbuf_add_ext(struct anv_execbuf * exec,uint32_t ext_name,struct i915_user_extension * ext)1225 anv_execbuf_add_ext(struct anv_execbuf *exec,
1226 uint32_t ext_name,
1227 struct i915_user_extension *ext)
1228 {
1229 __u64 *iter = &exec->execbuf.cliprects_ptr;
1230
1231 exec->execbuf.flags |= I915_EXEC_USE_EXTENSIONS;
1232
1233 while (*iter != 0) {
1234 iter = (__u64 *) &((struct i915_user_extension *)(uintptr_t)*iter)->next_extension;
1235 }
1236
1237 ext->name = ext_name;
1238
1239 *iter = (uintptr_t) ext;
1240 }
1241
1242 static VkResult
1243 anv_execbuf_add_bo_bitset(struct anv_device *device,
1244 struct anv_execbuf *exec,
1245 uint32_t dep_words,
1246 BITSET_WORD *deps,
1247 uint32_t extra_flags);
1248
1249 static VkResult
anv_execbuf_add_bo(struct anv_device * device,struct anv_execbuf * exec,struct anv_bo * bo,struct anv_reloc_list * relocs,uint32_t extra_flags)1250 anv_execbuf_add_bo(struct anv_device *device,
1251 struct anv_execbuf *exec,
1252 struct anv_bo *bo,
1253 struct anv_reloc_list *relocs,
1254 uint32_t extra_flags)
1255 {
1256 struct drm_i915_gem_exec_object2 *obj = NULL;
1257
1258 bo = anv_bo_unwrap(bo);
1259
1260 if (bo->exec_obj_index < exec->bo_count &&
1261 exec->bos[bo->exec_obj_index] == bo)
1262 obj = &exec->objects[bo->exec_obj_index];
1263
1264 if (obj == NULL) {
1265 /* We've never seen this one before. Add it to the list and assign
1266 * an id that we can use later.
1267 */
1268 if (exec->bo_count >= exec->array_length) {
1269 uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1270
1271 struct drm_i915_gem_exec_object2 *new_objects =
1272 vk_alloc(exec->alloc, new_len * sizeof(*new_objects), 8, exec->alloc_scope);
1273 if (new_objects == NULL)
1274 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1275
1276 struct anv_bo **new_bos =
1277 vk_alloc(exec->alloc, new_len * sizeof(*new_bos), 8, exec->alloc_scope);
1278 if (new_bos == NULL) {
1279 vk_free(exec->alloc, new_objects);
1280 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1281 }
1282
1283 if (exec->objects) {
1284 memcpy(new_objects, exec->objects,
1285 exec->bo_count * sizeof(*new_objects));
1286 memcpy(new_bos, exec->bos,
1287 exec->bo_count * sizeof(*new_bos));
1288 }
1289
1290 vk_free(exec->alloc, exec->objects);
1291 vk_free(exec->alloc, exec->bos);
1292
1293 exec->objects = new_objects;
1294 exec->bos = new_bos;
1295 exec->array_length = new_len;
1296 }
1297
1298 assert(exec->bo_count < exec->array_length);
1299
1300 bo->exec_obj_index = exec->bo_count++;
1301 obj = &exec->objects[bo->exec_obj_index];
1302 exec->bos[bo->exec_obj_index] = bo;
1303
1304 obj->handle = bo->gem_handle;
1305 obj->relocation_count = 0;
1306 obj->relocs_ptr = 0;
1307 obj->alignment = 0;
1308 obj->offset = bo->offset;
1309 obj->flags = bo->flags | extra_flags;
1310 obj->rsvd1 = 0;
1311 obj->rsvd2 = 0;
1312 }
1313
1314 if (extra_flags & EXEC_OBJECT_WRITE) {
1315 obj->flags |= EXEC_OBJECT_WRITE;
1316 obj->flags &= ~EXEC_OBJECT_ASYNC;
1317 }
1318
1319 if (relocs != NULL) {
1320 assert(obj->relocation_count == 0);
1321
1322 if (relocs->num_relocs > 0) {
1323 /* This is the first time we've ever seen a list of relocations for
1324 * this BO. Go ahead and set the relocations and then walk the list
1325 * of relocations and add them all.
1326 */
1327 exec->has_relocs = true;
1328 obj->relocation_count = relocs->num_relocs;
1329 obj->relocs_ptr = (uintptr_t) relocs->relocs;
1330
1331 for (size_t i = 0; i < relocs->num_relocs; i++) {
1332 VkResult result;
1333
1334 /* A quick sanity check on relocations */
1335 assert(relocs->relocs[i].offset < bo->size);
1336 result = anv_execbuf_add_bo(device, exec, relocs->reloc_bos[i],
1337 NULL, extra_flags);
1338 if (result != VK_SUCCESS)
1339 return result;
1340 }
1341 }
1342
1343 return anv_execbuf_add_bo_bitset(device, exec, relocs->dep_words,
1344 relocs->deps, extra_flags);
1345 }
1346
1347 return VK_SUCCESS;
1348 }
1349
1350 /* Add BO dependencies to execbuf */
1351 static VkResult
anv_execbuf_add_bo_bitset(struct anv_device * device,struct anv_execbuf * exec,uint32_t dep_words,BITSET_WORD * deps,uint32_t extra_flags)1352 anv_execbuf_add_bo_bitset(struct anv_device *device,
1353 struct anv_execbuf *exec,
1354 uint32_t dep_words,
1355 BITSET_WORD *deps,
1356 uint32_t extra_flags)
1357 {
1358 for (uint32_t w = 0; w < dep_words; w++) {
1359 BITSET_WORD mask = deps[w];
1360 while (mask) {
1361 int i = u_bit_scan(&mask);
1362 uint32_t gem_handle = w * BITSET_WORDBITS + i;
1363 struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
1364 assert(bo->refcount > 0);
1365 VkResult result =
1366 anv_execbuf_add_bo(device, exec, bo, NULL, extra_flags);
1367 if (result != VK_SUCCESS)
1368 return result;
1369 }
1370 }
1371
1372 return VK_SUCCESS;
1373 }
1374
1375 static void
anv_cmd_buffer_process_relocs(struct anv_cmd_buffer * cmd_buffer,struct anv_reloc_list * list)1376 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1377 struct anv_reloc_list *list)
1378 {
1379 for (size_t i = 0; i < list->num_relocs; i++) {
1380 list->relocs[i].target_handle =
1381 anv_bo_unwrap(list->reloc_bos[i])->exec_obj_index;
1382 }
1383 }
1384
1385 static void
adjust_relocations_from_state_pool(struct anv_state_pool * pool,struct anv_reloc_list * relocs,uint32_t last_pool_center_bo_offset)1386 adjust_relocations_from_state_pool(struct anv_state_pool *pool,
1387 struct anv_reloc_list *relocs,
1388 uint32_t last_pool_center_bo_offset)
1389 {
1390 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1391 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1392
1393 for (size_t i = 0; i < relocs->num_relocs; i++) {
1394 /* All of the relocations from this block pool to other BO's should
1395 * have been emitted relative to the surface block pool center. We
1396 * need to add the center offset to make them relative to the
1397 * beginning of the actual GEM bo.
1398 */
1399 relocs->relocs[i].offset += delta;
1400 }
1401 }
1402
1403 static void
adjust_relocations_to_state_pool(struct anv_state_pool * pool,struct anv_bo * from_bo,struct anv_reloc_list * relocs,uint32_t last_pool_center_bo_offset)1404 adjust_relocations_to_state_pool(struct anv_state_pool *pool,
1405 struct anv_bo *from_bo,
1406 struct anv_reloc_list *relocs,
1407 uint32_t last_pool_center_bo_offset)
1408 {
1409 assert(!from_bo->is_wrapper);
1410 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1411 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1412
1413 /* When we initially emit relocations into a block pool, we don't
1414 * actually know what the final center_bo_offset will be so we just emit
1415 * it as if center_bo_offset == 0. Now that we know what the center
1416 * offset is, we need to walk the list of relocations and adjust any
1417 * relocations that point to the pool bo with the correct offset.
1418 */
1419 for (size_t i = 0; i < relocs->num_relocs; i++) {
1420 if (relocs->reloc_bos[i] == pool->block_pool.bo) {
1421 /* Adjust the delta value in the relocation to correctly
1422 * correspond to the new delta. Initially, this value may have
1423 * been negative (if treated as unsigned), but we trust in
1424 * uint32_t roll-over to fix that for us at this point.
1425 */
1426 relocs->relocs[i].delta += delta;
1427
1428 /* Since the delta has changed, we need to update the actual
1429 * relocated value with the new presumed value. This function
1430 * should only be called on batch buffers, so we know it isn't in
1431 * use by the GPU at the moment.
1432 */
1433 assert(relocs->relocs[i].offset < from_bo->size);
1434 write_reloc(pool->block_pool.device,
1435 from_bo->map + relocs->relocs[i].offset,
1436 relocs->relocs[i].presumed_offset +
1437 relocs->relocs[i].delta, false);
1438 }
1439 }
1440 }
1441
1442 static void
anv_reloc_list_apply(struct anv_device * device,struct anv_reloc_list * list,struct anv_bo * bo,bool always_relocate)1443 anv_reloc_list_apply(struct anv_device *device,
1444 struct anv_reloc_list *list,
1445 struct anv_bo *bo,
1446 bool always_relocate)
1447 {
1448 bo = anv_bo_unwrap(bo);
1449
1450 for (size_t i = 0; i < list->num_relocs; i++) {
1451 struct anv_bo *target_bo = anv_bo_unwrap(list->reloc_bos[i]);
1452 if (list->relocs[i].presumed_offset == target_bo->offset &&
1453 !always_relocate)
1454 continue;
1455
1456 void *p = bo->map + list->relocs[i].offset;
1457 write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1458 list->relocs[i].presumed_offset = target_bo->offset;
1459 }
1460 }
1461
1462 /**
1463 * This function applies the relocation for a command buffer and writes the
1464 * actual addresses into the buffers as per what we were told by the kernel on
1465 * the previous execbuf2 call. This should be safe to do because, for each
1466 * relocated address, we have two cases:
1467 *
1468 * 1) The target BO is inactive (as seen by the kernel). In this case, it is
1469 * not in use by the GPU so updating the address is 100% ok. It won't be
1470 * in-use by the GPU (from our context) again until the next execbuf2
1471 * happens. If the kernel decides to move it in the next execbuf2, it
1472 * will have to do the relocations itself, but that's ok because it should
1473 * have all of the information needed to do so.
1474 *
1475 * 2) The target BO is active (as seen by the kernel). In this case, it
1476 * hasn't moved since the last execbuffer2 call because GTT shuffling
1477 * *only* happens when the BO is idle. (From our perspective, it only
1478 * happens inside the execbuffer2 ioctl, but the shuffling may be
1479 * triggered by another ioctl, with full-ppgtt this is limited to only
1480 * execbuffer2 ioctls on the same context, or memory pressure.) Since the
1481 * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1482 * address and the relocated value we are writing into the BO will be the
1483 * same as the value that is already there.
1484 *
1485 * There is also a possibility that the target BO is active but the exact
1486 * RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1487 * use. In this case, the address currently in the RENDER_SURFACE_STATE
1488 * may be stale but it's still safe to write the relocation because that
1489 * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1490 * won't be until the next execbuf2 call.
1491 *
1492 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1493 * need to bother. We want to do this because the surface state buffer is
1494 * used by every command buffer so, if the kernel does the relocations, it
1495 * will always be busy and the kernel will always stall. This is also
1496 * probably the fastest mechanism for doing relocations since the kernel would
1497 * have to make a full copy of all the relocations lists.
1498 */
1499 static bool
execbuf_can_skip_relocations(struct anv_execbuf * exec)1500 execbuf_can_skip_relocations(struct anv_execbuf *exec)
1501 {
1502 if (!exec->has_relocs)
1503 return true;
1504
1505 static int userspace_relocs = -1;
1506 if (userspace_relocs < 0)
1507 userspace_relocs = debug_get_bool_option("ANV_USERSPACE_RELOCS", true);
1508 if (!userspace_relocs)
1509 return false;
1510
1511 /* First, we have to check to see whether or not we can even do the
1512 * relocation. New buffers which have never been submitted to the kernel
1513 * don't have a valid offset so we need to let the kernel do relocations so
1514 * that we can get offsets for them. On future execbuf2 calls, those
1515 * buffers will have offsets and we will be able to skip relocating.
1516 * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1517 */
1518 for (uint32_t i = 0; i < exec->bo_count; i++) {
1519 assert(!exec->bos[i]->is_wrapper);
1520 if (exec->bos[i]->offset == (uint64_t)-1)
1521 return false;
1522 }
1523
1524 return true;
1525 }
1526
1527 static void
relocate_cmd_buffer(struct anv_cmd_buffer * cmd_buffer,struct anv_execbuf * exec)1528 relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1529 struct anv_execbuf *exec)
1530 {
1531 /* Since surface states are shared between command buffers and we don't
1532 * know what order they will be submitted to the kernel, we don't know
1533 * what address is actually written in the surface state object at any
1534 * given time. The only option is to always relocate them.
1535 */
1536 struct anv_bo *surface_state_bo =
1537 anv_bo_unwrap(cmd_buffer->device->surface_state_pool.block_pool.bo);
1538 anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1539 surface_state_bo,
1540 true /* always relocate surface states */);
1541
1542 /* Since we own all of the batch buffers, we know what values are stored
1543 * in the relocated addresses and only have to update them if the offsets
1544 * have changed.
1545 */
1546 struct anv_batch_bo **bbo;
1547 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1548 anv_reloc_list_apply(cmd_buffer->device,
1549 &(*bbo)->relocs, (*bbo)->bo, false);
1550 }
1551
1552 for (uint32_t i = 0; i < exec->bo_count; i++)
1553 exec->objects[i].offset = exec->bos[i]->offset;
1554 }
1555
1556 static void
reset_cmd_buffer_surface_offsets(struct anv_cmd_buffer * cmd_buffer)1557 reset_cmd_buffer_surface_offsets(struct anv_cmd_buffer *cmd_buffer)
1558 {
1559 /* In the case where we fall back to doing kernel relocations, we need to
1560 * ensure that the relocation list is valid. All relocations on the batch
1561 * buffers are already valid and kept up-to-date. Since surface states are
1562 * shared between command buffers and we don't know what order they will be
1563 * submitted to the kernel, we don't know what address is actually written
1564 * in the surface state object at any given time. The only option is to set
1565 * a bogus presumed offset and let the kernel relocate them.
1566 */
1567 for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1568 cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1569 }
1570
1571 static VkResult
anv_execbuf_add_syncobj(struct anv_device * device,struct anv_execbuf * exec,uint32_t syncobj,uint32_t flags,uint64_t timeline_value)1572 anv_execbuf_add_syncobj(struct anv_device *device,
1573 struct anv_execbuf *exec,
1574 uint32_t syncobj,
1575 uint32_t flags,
1576 uint64_t timeline_value)
1577 {
1578 if (exec->syncobj_count >= exec->syncobj_array_length) {
1579 uint32_t new_len = MAX2(exec->syncobj_array_length * 2, 16);
1580
1581 struct drm_i915_gem_exec_fence *new_syncobjs =
1582 vk_alloc(exec->alloc, new_len * sizeof(*new_syncobjs),
1583 8, exec->alloc_scope);
1584 if (!new_syncobjs)
1585 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1586
1587 if (exec->syncobjs)
1588 typed_memcpy(new_syncobjs, exec->syncobjs, exec->syncobj_count);
1589
1590 exec->syncobjs = new_syncobjs;
1591
1592 if (exec->syncobj_values) {
1593 uint64_t *new_syncobj_values =
1594 vk_alloc(exec->alloc, new_len * sizeof(*new_syncobj_values),
1595 8, exec->alloc_scope);
1596 if (!new_syncobj_values)
1597 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1598
1599 typed_memcpy(new_syncobj_values, exec->syncobj_values,
1600 exec->syncobj_count);
1601
1602 exec->syncobj_values = new_syncobj_values;
1603 }
1604
1605 exec->syncobj_array_length = new_len;
1606 }
1607
1608 if (timeline_value && !exec->syncobj_values) {
1609 exec->syncobj_values =
1610 vk_zalloc(exec->alloc, exec->syncobj_array_length *
1611 sizeof(*exec->syncobj_values),
1612 8, exec->alloc_scope);
1613 if (!exec->syncobj_values)
1614 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1615 }
1616
1617 exec->syncobjs[exec->syncobj_count] = (struct drm_i915_gem_exec_fence) {
1618 .handle = syncobj,
1619 .flags = flags,
1620 };
1621 if (exec->syncobj_values)
1622 exec->syncobj_values[exec->syncobj_count] = timeline_value;
1623
1624 exec->syncobj_count++;
1625
1626 return VK_SUCCESS;
1627 }
1628
1629 static VkResult
anv_execbuf_add_sync(struct anv_device * device,struct anv_execbuf * execbuf,struct vk_sync * sync,bool is_signal,uint64_t value)1630 anv_execbuf_add_sync(struct anv_device *device,
1631 struct anv_execbuf *execbuf,
1632 struct vk_sync *sync,
1633 bool is_signal,
1634 uint64_t value)
1635 {
1636 /* It's illegal to signal a timeline with value 0 because that's never
1637 * higher than the current value. A timeline wait on value 0 is always
1638 * trivial because 0 <= uint64_t always.
1639 */
1640 if ((sync->flags & VK_SYNC_IS_TIMELINE) && value == 0)
1641 return VK_SUCCESS;
1642
1643 if (vk_sync_is_anv_bo_sync(sync)) {
1644 struct anv_bo_sync *bo_sync =
1645 container_of(sync, struct anv_bo_sync, sync);
1646
1647 assert(is_signal == (bo_sync->state == ANV_BO_SYNC_STATE_RESET));
1648
1649 return anv_execbuf_add_bo(device, execbuf, bo_sync->bo, NULL,
1650 is_signal ? EXEC_OBJECT_WRITE : 0);
1651 } else if (vk_sync_type_is_drm_syncobj(sync->type)) {
1652 struct vk_drm_syncobj *syncobj = vk_sync_as_drm_syncobj(sync);
1653
1654 if (!(sync->flags & VK_SYNC_IS_TIMELINE))
1655 value = 0;
1656
1657 return anv_execbuf_add_syncobj(device, execbuf, syncobj->syncobj,
1658 is_signal ? I915_EXEC_FENCE_SIGNAL :
1659 I915_EXEC_FENCE_WAIT,
1660 value);
1661 }
1662
1663 unreachable("Invalid sync type");
1664 }
1665
1666 static VkResult
setup_execbuf_for_cmd_buffer(struct anv_execbuf * execbuf,struct anv_cmd_buffer * cmd_buffer)1667 setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1668 struct anv_cmd_buffer *cmd_buffer)
1669 {
1670 struct anv_state_pool *ss_pool =
1671 &cmd_buffer->device->surface_state_pool;
1672
1673 adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1674 cmd_buffer->last_ss_pool_center);
1675 VkResult result;
1676 if (anv_use_relocations(cmd_buffer->device->physical)) {
1677 /* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
1678 * will get added automatically by processing relocations on the batch
1679 * buffer. We have to add the surface state BO manually because it has
1680 * relocations of its own that we need to be sure are processed.
1681 */
1682 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1683 ss_pool->block_pool.bo,
1684 &cmd_buffer->surface_relocs, 0);
1685 if (result != VK_SUCCESS)
1686 return result;
1687 } else {
1688 /* Add surface dependencies (BOs) to the execbuf */
1689 result = anv_execbuf_add_bo_bitset(cmd_buffer->device, execbuf,
1690 cmd_buffer->surface_relocs.dep_words,
1691 cmd_buffer->surface_relocs.deps, 0);
1692 if (result != VK_SUCCESS)
1693 return result;
1694 }
1695
1696 /* First, we walk over all of the bos we've seen and add them and their
1697 * relocations to the validate list.
1698 */
1699 struct anv_batch_bo **bbo;
1700 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1701 adjust_relocations_to_state_pool(ss_pool, (*bbo)->bo, &(*bbo)->relocs,
1702 cmd_buffer->last_ss_pool_center);
1703
1704 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1705 (*bbo)->bo, &(*bbo)->relocs, 0);
1706 if (result != VK_SUCCESS)
1707 return result;
1708 }
1709
1710 /* Now that we've adjusted all of the surface state relocations, we need to
1711 * record the surface state pool center so future executions of the command
1712 * buffer can adjust correctly.
1713 */
1714 cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
1715
1716 return VK_SUCCESS;
1717 }
1718
1719 static void
chain_command_buffers(struct anv_cmd_buffer ** cmd_buffers,uint32_t num_cmd_buffers)1720 chain_command_buffers(struct anv_cmd_buffer **cmd_buffers,
1721 uint32_t num_cmd_buffers)
1722 {
1723 if (!anv_cmd_buffer_is_chainable(cmd_buffers[0])) {
1724 assert(num_cmd_buffers == 1);
1725 return;
1726 }
1727
1728 /* Chain the N-1 first batch buffers */
1729 for (uint32_t i = 0; i < (num_cmd_buffers - 1); i++)
1730 anv_cmd_buffer_record_chain_submit(cmd_buffers[i], cmd_buffers[i + 1]);
1731
1732 /* Put an end to the last one */
1733 anv_cmd_buffer_record_end_submit(cmd_buffers[num_cmd_buffers - 1]);
1734 }
1735
1736 static VkResult
setup_execbuf_for_cmd_buffers(struct anv_execbuf * execbuf,struct anv_queue * queue,struct anv_cmd_buffer ** cmd_buffers,uint32_t num_cmd_buffers)1737 setup_execbuf_for_cmd_buffers(struct anv_execbuf *execbuf,
1738 struct anv_queue *queue,
1739 struct anv_cmd_buffer **cmd_buffers,
1740 uint32_t num_cmd_buffers)
1741 {
1742 struct anv_device *device = queue->device;
1743 struct anv_state_pool *ss_pool = &device->surface_state_pool;
1744 VkResult result;
1745
1746 /* Edit the tail of the command buffers to chain them all together if they
1747 * can be.
1748 */
1749 chain_command_buffers(cmd_buffers, num_cmd_buffers);
1750
1751 for (uint32_t i = 0; i < num_cmd_buffers; i++) {
1752 anv_measure_submit(cmd_buffers[i]);
1753 result = setup_execbuf_for_cmd_buffer(execbuf, cmd_buffers[i]);
1754 if (result != VK_SUCCESS)
1755 return result;
1756 }
1757
1758 /* Add all the global BOs to the object list for softpin case. */
1759 if (!anv_use_relocations(device->physical)) {
1760 anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
1761 result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1762 if (result != VK_SUCCESS)
1763 return result;
1764 }
1765
1766 struct anv_block_pool *pool;
1767 pool = &device->dynamic_state_pool.block_pool;
1768 anv_block_pool_foreach_bo(bo, pool) {
1769 result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1770 if (result != VK_SUCCESS)
1771 return result;
1772 }
1773
1774 pool = &device->general_state_pool.block_pool;
1775 anv_block_pool_foreach_bo(bo, pool) {
1776 result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1777 if (result != VK_SUCCESS)
1778 return result;
1779 }
1780
1781 pool = &device->instruction_state_pool.block_pool;
1782 anv_block_pool_foreach_bo(bo, pool) {
1783 result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1784 if (result != VK_SUCCESS)
1785 return result;
1786 }
1787
1788 pool = &device->binding_table_pool.block_pool;
1789 anv_block_pool_foreach_bo(bo, pool) {
1790 result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1791 if (result != VK_SUCCESS)
1792 return result;
1793 }
1794
1795 /* Add the BOs for all user allocated memory objects because we can't
1796 * track after binding updates of VK_EXT_descriptor_indexing.
1797 */
1798 list_for_each_entry(struct anv_device_memory, mem,
1799 &device->memory_objects, link) {
1800 result = anv_execbuf_add_bo(device, execbuf, mem->bo, NULL, 0);
1801 if (result != VK_SUCCESS)
1802 return result;
1803 }
1804 } else {
1805 /* We do not support chaining primary command buffers without
1806 * softpin.
1807 */
1808 assert(num_cmd_buffers == 1);
1809 }
1810
1811 bool no_reloc = true;
1812 if (execbuf->has_relocs) {
1813 no_reloc = execbuf_can_skip_relocations(execbuf);
1814 if (no_reloc) {
1815 /* If we were able to successfully relocate everything, tell the
1816 * kernel that it can skip doing relocations. The requirement for
1817 * using NO_RELOC is:
1818 *
1819 * 1) The addresses written in the objects must match the
1820 * corresponding reloc.presumed_offset which in turn must match
1821 * the corresponding execobject.offset.
1822 *
1823 * 2) To avoid stalling, execobject.offset should match the current
1824 * address of that object within the active context.
1825 *
1826 * In order to satisfy all of the invariants that make userspace
1827 * relocations to be safe (see relocate_cmd_buffer()), we need to
1828 * further ensure that the addresses we use match those used by the
1829 * kernel for the most recent execbuf2.
1830 *
1831 * The kernel may still choose to do relocations anyway if something
1832 * has moved in the GTT. In this case, the relocation list still
1833 * needs to be valid. All relocations on the batch buffers are
1834 * already valid and kept up-to-date. For surface state relocations,
1835 * by applying the relocations in relocate_cmd_buffer, we ensured
1836 * that the address in the RENDER_SURFACE_STATE matches
1837 * presumed_offset, so it should be safe for the kernel to relocate
1838 * them as needed.
1839 */
1840 for (uint32_t i = 0; i < num_cmd_buffers; i++) {
1841 relocate_cmd_buffer(cmd_buffers[i], execbuf);
1842
1843 anv_reloc_list_apply(device, &cmd_buffers[i]->surface_relocs,
1844 device->surface_state_pool.block_pool.bo,
1845 true /* always relocate surface states */);
1846 }
1847 } else {
1848 /* In the case where we fall back to doing kernel relocations, we
1849 * need to ensure that the relocation list is valid. All relocations
1850 * on the batch buffers are already valid and kept up-to-date. Since
1851 * surface states are shared between command buffers and we don't
1852 * know what order they will be submitted to the kernel, we don't
1853 * know what address is actually written in the surface state object
1854 * at any given time. The only option is to set a bogus presumed
1855 * offset and let the kernel relocate them.
1856 */
1857 for (uint32_t i = 0; i < num_cmd_buffers; i++)
1858 reset_cmd_buffer_surface_offsets(cmd_buffers[i]);
1859 }
1860 }
1861
1862 struct anv_batch_bo *first_batch_bo =
1863 list_first_entry(&cmd_buffers[0]->batch_bos, struct anv_batch_bo, link);
1864
1865 /* The kernel requires that the last entry in the validation list be the
1866 * batch buffer to execute. We can simply swap the element
1867 * corresponding to the first batch_bo in the chain with the last
1868 * element in the list.
1869 */
1870 if (first_batch_bo->bo->exec_obj_index != execbuf->bo_count - 1) {
1871 uint32_t idx = first_batch_bo->bo->exec_obj_index;
1872 uint32_t last_idx = execbuf->bo_count - 1;
1873
1874 struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1875 assert(execbuf->bos[idx] == first_batch_bo->bo);
1876
1877 execbuf->objects[idx] = execbuf->objects[last_idx];
1878 execbuf->bos[idx] = execbuf->bos[last_idx];
1879 execbuf->bos[idx]->exec_obj_index = idx;
1880
1881 execbuf->objects[last_idx] = tmp_obj;
1882 execbuf->bos[last_idx] = first_batch_bo->bo;
1883 first_batch_bo->bo->exec_obj_index = last_idx;
1884 }
1885
1886 /* If we are pinning our BOs, we shouldn't have to relocate anything */
1887 if (!anv_use_relocations(device->physical))
1888 assert(!execbuf->has_relocs);
1889
1890 /* Now we go through and fixup all of the relocation lists to point to the
1891 * correct indices in the object array (I915_EXEC_HANDLE_LUT). We have to
1892 * do this after we reorder the list above as some of the indices may have
1893 * changed.
1894 */
1895 struct anv_batch_bo **bbo;
1896 if (execbuf->has_relocs) {
1897 assert(num_cmd_buffers == 1);
1898 u_vector_foreach(bbo, &cmd_buffers[0]->seen_bbos)
1899 anv_cmd_buffer_process_relocs(cmd_buffers[0], &(*bbo)->relocs);
1900
1901 anv_cmd_buffer_process_relocs(cmd_buffers[0], &cmd_buffers[0]->surface_relocs);
1902 }
1903
1904 #ifdef SUPPORT_INTEL_INTEGRATED_GPUS
1905 if (device->physical->memory.need_flush) {
1906 __builtin_ia32_mfence();
1907 for (uint32_t i = 0; i < num_cmd_buffers; i++) {
1908 u_vector_foreach(bbo, &cmd_buffers[i]->seen_bbos) {
1909 intel_flush_range_no_fence((*bbo)->bo->map, (*bbo)->length);
1910 }
1911 }
1912 __builtin_ia32_mfence();
1913 }
1914 #endif
1915
1916 struct anv_batch *batch = &cmd_buffers[0]->batch;
1917 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1918 .buffers_ptr = (uintptr_t) execbuf->objects,
1919 .buffer_count = execbuf->bo_count,
1920 .batch_start_offset = 0,
1921 /* On platforms that cannot chain batch buffers because of the i915
1922 * command parser, we have to provide the batch length. Everywhere else
1923 * we'll chain batches so no point in passing a length.
1924 */
1925 .batch_len = device->can_chain_batches ? 0 : batch->next - batch->start,
1926 .cliprects_ptr = 0,
1927 .num_cliprects = 0,
1928 .DR1 = 0,
1929 .DR4 = 0,
1930 .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags | (no_reloc ? I915_EXEC_NO_RELOC : 0),
1931 .rsvd1 = device->context_id,
1932 .rsvd2 = 0,
1933 };
1934
1935 return VK_SUCCESS;
1936 }
1937
1938 static VkResult
setup_empty_execbuf(struct anv_execbuf * execbuf,struct anv_queue * queue)1939 setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_queue *queue)
1940 {
1941 struct anv_device *device = queue->device;
1942 VkResult result = anv_execbuf_add_bo(device, execbuf,
1943 device->trivial_batch_bo,
1944 NULL, 0);
1945 if (result != VK_SUCCESS)
1946 return result;
1947
1948 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1949 .buffers_ptr = (uintptr_t) execbuf->objects,
1950 .buffer_count = execbuf->bo_count,
1951 .batch_start_offset = 0,
1952 .batch_len = 8, /* GFX7_MI_BATCH_BUFFER_END and NOOP */
1953 .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags | I915_EXEC_NO_RELOC,
1954 .rsvd1 = device->context_id,
1955 .rsvd2 = 0,
1956 };
1957
1958 return VK_SUCCESS;
1959 }
1960
1961 static VkResult
setup_utrace_execbuf(struct anv_execbuf * execbuf,struct anv_queue * queue,struct anv_utrace_flush_copy * flush)1962 setup_utrace_execbuf(struct anv_execbuf *execbuf, struct anv_queue *queue,
1963 struct anv_utrace_flush_copy *flush)
1964 {
1965 struct anv_device *device = queue->device;
1966 VkResult result = anv_execbuf_add_bo(device, execbuf,
1967 flush->batch_bo,
1968 &flush->relocs, 0);
1969 if (result != VK_SUCCESS)
1970 return result;
1971
1972 result = anv_execbuf_add_sync(device, execbuf, flush->sync,
1973 true /* is_signal */, 0 /* value */);
1974 if (result != VK_SUCCESS)
1975 return result;
1976
1977 if (flush->batch_bo->exec_obj_index != execbuf->bo_count - 1) {
1978 uint32_t idx = flush->batch_bo->exec_obj_index;
1979 uint32_t last_idx = execbuf->bo_count - 1;
1980
1981 struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1982 assert(execbuf->bos[idx] == flush->batch_bo);
1983
1984 execbuf->objects[idx] = execbuf->objects[last_idx];
1985 execbuf->bos[idx] = execbuf->bos[last_idx];
1986 execbuf->bos[idx]->exec_obj_index = idx;
1987
1988 execbuf->objects[last_idx] = tmp_obj;
1989 execbuf->bos[last_idx] = flush->batch_bo;
1990 flush->batch_bo->exec_obj_index = last_idx;
1991 }
1992
1993 #ifdef SUPPORT_INTEL_INTEGRATED_GPUS
1994 if (device->physical->memory.need_flush)
1995 intel_flush_range(flush->batch_bo->map, flush->batch_bo->size);
1996 #endif
1997
1998 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1999 .buffers_ptr = (uintptr_t) execbuf->objects,
2000 .buffer_count = execbuf->bo_count,
2001 .batch_start_offset = 0,
2002 .batch_len = flush->batch.next - flush->batch.start,
2003 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_FENCE_ARRAY | queue->exec_flags |
2004 (execbuf->has_relocs ? 0 : I915_EXEC_NO_RELOC),
2005 .rsvd1 = device->context_id,
2006 .rsvd2 = 0,
2007 .num_cliprects = execbuf->syncobj_count,
2008 .cliprects_ptr = (uintptr_t)execbuf->syncobjs,
2009 };
2010
2011 return VK_SUCCESS;
2012 }
2013
2014 static VkResult
anv_queue_exec_utrace_locked(struct anv_queue * queue,struct anv_utrace_flush_copy * flush)2015 anv_queue_exec_utrace_locked(struct anv_queue *queue,
2016 struct anv_utrace_flush_copy *flush)
2017 {
2018 assert(flush->batch_bo);
2019
2020 struct anv_device *device = queue->device;
2021 struct anv_execbuf execbuf = {
2022 .alloc = &device->vk.alloc,
2023 .alloc_scope = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE,
2024 };
2025
2026 VkResult result = setup_utrace_execbuf(&execbuf, queue, flush);
2027 if (result != VK_SUCCESS)
2028 goto error;
2029
2030 int ret = queue->device->info->no_hw ? 0 :
2031 anv_gem_execbuffer(queue->device, &execbuf.execbuf);
2032 if (ret)
2033 result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
2034
2035 struct drm_i915_gem_exec_object2 *objects = execbuf.objects;
2036 for (uint32_t k = 0; k < execbuf.bo_count; k++) {
2037 if (anv_bo_is_pinned(execbuf.bos[k]))
2038 assert(execbuf.bos[k]->offset == objects[k].offset);
2039 execbuf.bos[k]->offset = objects[k].offset;
2040 }
2041
2042 error:
2043 anv_execbuf_finish(&execbuf);
2044
2045 return result;
2046 }
2047
2048 /* We lock around execbuf for three main reasons:
2049 *
2050 * 1) When a block pool is resized, we create a new gem handle with a
2051 * different size and, in the case of surface states, possibly a different
2052 * center offset but we re-use the same anv_bo struct when we do so. If
2053 * this happens in the middle of setting up an execbuf, we could end up
2054 * with our list of BOs out of sync with our list of gem handles.
2055 *
2056 * 2) The algorithm we use for building the list of unique buffers isn't
2057 * thread-safe. While the client is supposed to synchronize around
2058 * QueueSubmit, this would be extremely difficult to debug if it ever came
2059 * up in the wild due to a broken app. It's better to play it safe and
2060 * just lock around QueueSubmit.
2061 *
2062 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
2063 * userspace. Due to the fact that the surface state buffer is shared
2064 * between batches, we can't afford to have that happen from multiple
2065 * threads at the same time. Even though the user is supposed to ensure
2066 * this doesn't happen, we play it safe as in (2) above.
2067 *
2068 * Since the only other things that ever take the device lock such as block
2069 * pool resize only rarely happen, this will almost never be contended so
2070 * taking a lock isn't really an expensive operation in this case.
2071 */
2072 static VkResult
anv_queue_exec_locked(struct anv_queue * queue,uint32_t wait_count,const struct vk_sync_wait * waits,uint32_t cmd_buffer_count,struct anv_cmd_buffer ** cmd_buffers,uint32_t signal_count,const struct vk_sync_signal * signals,struct anv_query_pool * perf_query_pool,uint32_t perf_query_pass)2073 anv_queue_exec_locked(struct anv_queue *queue,
2074 uint32_t wait_count,
2075 const struct vk_sync_wait *waits,
2076 uint32_t cmd_buffer_count,
2077 struct anv_cmd_buffer **cmd_buffers,
2078 uint32_t signal_count,
2079 const struct vk_sync_signal *signals,
2080 struct anv_query_pool *perf_query_pool,
2081 uint32_t perf_query_pass)
2082 {
2083 struct anv_device *device = queue->device;
2084 struct anv_utrace_flush_copy *utrace_flush_data = NULL;
2085 struct anv_execbuf execbuf = {
2086 .alloc = &queue->device->vk.alloc,
2087 .alloc_scope = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE,
2088 .perf_query_pass = perf_query_pass,
2089 };
2090
2091 /* Flush the trace points first, they need to be moved */
2092 VkResult result =
2093 anv_device_utrace_flush_cmd_buffers(queue,
2094 cmd_buffer_count,
2095 cmd_buffers,
2096 &utrace_flush_data);
2097 if (result != VK_SUCCESS)
2098 goto error;
2099
2100 if (utrace_flush_data && !utrace_flush_data->batch_bo) {
2101 result = anv_execbuf_add_sync(device, &execbuf,
2102 utrace_flush_data->sync,
2103 true /* is_signal */,
2104 0);
2105 if (result != VK_SUCCESS)
2106 goto error;
2107
2108 utrace_flush_data = NULL;
2109 }
2110
2111 /* Always add the workaround BO as it includes a driver identifier for the
2112 * error_state.
2113 */
2114 result =
2115 anv_execbuf_add_bo(device, &execbuf, device->workaround_bo, NULL, 0);
2116 if (result != VK_SUCCESS)
2117 goto error;
2118
2119 for (uint32_t i = 0; i < wait_count; i++) {
2120 result = anv_execbuf_add_sync(device, &execbuf,
2121 waits[i].sync,
2122 false /* is_signal */,
2123 waits[i].wait_value);
2124 if (result != VK_SUCCESS)
2125 goto error;
2126 }
2127
2128 for (uint32_t i = 0; i < signal_count; i++) {
2129 result = anv_execbuf_add_sync(device, &execbuf,
2130 signals[i].sync,
2131 true /* is_signal */,
2132 signals[i].signal_value);
2133 if (result != VK_SUCCESS)
2134 goto error;
2135 }
2136
2137 if (queue->sync) {
2138 result = anv_execbuf_add_sync(device, &execbuf,
2139 queue->sync,
2140 true /* is_signal */,
2141 0 /* signal_value */);
2142 if (result != VK_SUCCESS)
2143 goto error;
2144 }
2145
2146 if (cmd_buffer_count) {
2147 result = setup_execbuf_for_cmd_buffers(&execbuf, queue,
2148 cmd_buffers,
2149 cmd_buffer_count);
2150 } else {
2151 result = setup_empty_execbuf(&execbuf, queue);
2152 }
2153
2154 if (result != VK_SUCCESS)
2155 goto error;
2156
2157 const bool has_perf_query = perf_query_pool && cmd_buffer_count;
2158
2159 if (INTEL_DEBUG(DEBUG_SUBMIT)) {
2160 fprintf(stderr, "Batch offset=0x%x len=0x%x on queue 0\n",
2161 execbuf.execbuf.batch_start_offset, execbuf.execbuf.batch_len);
2162 for (uint32_t i = 0; i < execbuf.bo_count; i++) {
2163 const struct anv_bo *bo = execbuf.bos[i];
2164
2165 fprintf(stderr, " BO: addr=0x%016"PRIx64"-0x%016"PRIx64" size=0x%010"PRIx64
2166 " handle=%05u name=%s\n",
2167 bo->offset, bo->offset + bo->size - 1, bo->size, bo->gem_handle, bo->name);
2168 }
2169 }
2170
2171 if (INTEL_DEBUG(DEBUG_BATCH)) {
2172 fprintf(stderr, "Batch on queue %d\n", (int)(queue - device->queues));
2173 if (cmd_buffer_count) {
2174 if (has_perf_query) {
2175 struct anv_bo *pass_batch_bo = perf_query_pool->bo;
2176 uint64_t pass_batch_offset =
2177 khr_perf_query_preamble_offset(perf_query_pool, perf_query_pass);
2178
2179 intel_print_batch(&device->decoder_ctx,
2180 pass_batch_bo->map + pass_batch_offset, 64,
2181 pass_batch_bo->offset + pass_batch_offset, false);
2182 }
2183
2184 for (uint32_t i = 0; i < cmd_buffer_count; i++) {
2185 struct anv_batch_bo **bo =
2186 u_vector_tail(&cmd_buffers[i]->seen_bbos);
2187 device->cmd_buffer_being_decoded = cmd_buffers[i];
2188 intel_print_batch(&device->decoder_ctx, (*bo)->bo->map,
2189 (*bo)->bo->size, (*bo)->bo->offset, false);
2190 device->cmd_buffer_being_decoded = NULL;
2191 }
2192 } else {
2193 intel_print_batch(&device->decoder_ctx,
2194 device->trivial_batch_bo->map,
2195 device->trivial_batch_bo->size,
2196 device->trivial_batch_bo->offset, false);
2197 }
2198 }
2199
2200 if (execbuf.syncobj_values) {
2201 execbuf.timeline_fences.fence_count = execbuf.syncobj_count;
2202 execbuf.timeline_fences.handles_ptr = (uintptr_t)execbuf.syncobjs;
2203 execbuf.timeline_fences.values_ptr = (uintptr_t)execbuf.syncobj_values;
2204 anv_execbuf_add_ext(&execbuf,
2205 DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES,
2206 &execbuf.timeline_fences.base);
2207 } else if (execbuf.syncobjs) {
2208 execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
2209 execbuf.execbuf.num_cliprects = execbuf.syncobj_count;
2210 execbuf.execbuf.cliprects_ptr = (uintptr_t)execbuf.syncobjs;
2211 }
2212
2213 if (has_perf_query) {
2214 assert(perf_query_pass < perf_query_pool->n_passes);
2215 struct intel_perf_query_info *query_info =
2216 perf_query_pool->pass_query[perf_query_pass];
2217
2218 /* Some performance queries just the pipeline statistic HW, no need for
2219 * OA in that case, so no need to reconfigure.
2220 */
2221 if (!INTEL_DEBUG(DEBUG_NO_OACONFIG) &&
2222 (query_info->kind == INTEL_PERF_QUERY_TYPE_OA ||
2223 query_info->kind == INTEL_PERF_QUERY_TYPE_RAW)) {
2224 int ret = intel_perf_stream_set_metrics_id(device->physical->perf,
2225 device->perf_fd,
2226 query_info->oa_metrics_set_id);
2227 if (ret < 0) {
2228 result = vk_device_set_lost(&device->vk,
2229 "i915-perf config failed: %s",
2230 strerror(errno));
2231 }
2232 }
2233
2234 struct anv_bo *pass_batch_bo = perf_query_pool->bo;
2235
2236 struct drm_i915_gem_exec_object2 query_pass_object = {
2237 .handle = pass_batch_bo->gem_handle,
2238 .offset = pass_batch_bo->offset,
2239 .flags = pass_batch_bo->flags,
2240 };
2241 struct drm_i915_gem_execbuffer2 query_pass_execbuf = {
2242 .buffers_ptr = (uintptr_t) &query_pass_object,
2243 .buffer_count = 1,
2244 .batch_start_offset = khr_perf_query_preamble_offset(perf_query_pool,
2245 perf_query_pass),
2246 .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags,
2247 .rsvd1 = device->context_id,
2248 };
2249
2250 int ret = queue->device->info->no_hw ? 0 :
2251 anv_gem_execbuffer(queue->device, &query_pass_execbuf);
2252 if (ret)
2253 result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
2254 }
2255
2256 int ret = queue->device->info->no_hw ? 0 :
2257 anv_gem_execbuffer(queue->device, &execbuf.execbuf);
2258 if (ret)
2259 result = vk_queue_set_lost(&queue->vk, "execbuf2 failed: %m");
2260
2261 if (result == VK_SUCCESS && queue->sync) {
2262 result = vk_sync_wait(&device->vk, queue->sync, 0,
2263 VK_SYNC_WAIT_COMPLETE, UINT64_MAX);
2264 if (result != VK_SUCCESS)
2265 result = vk_queue_set_lost(&queue->vk, "sync wait failed");
2266 }
2267
2268 struct drm_i915_gem_exec_object2 *objects = execbuf.objects;
2269 for (uint32_t k = 0; k < execbuf.bo_count; k++) {
2270 if (anv_bo_is_pinned(execbuf.bos[k]))
2271 assert(execbuf.bos[k]->offset == objects[k].offset);
2272 execbuf.bos[k]->offset = objects[k].offset;
2273 }
2274
2275 error:
2276 anv_execbuf_finish(&execbuf);
2277
2278 if (result == VK_SUCCESS && utrace_flush_data)
2279 result = anv_queue_exec_utrace_locked(queue, utrace_flush_data);
2280
2281 return result;
2282 }
2283
2284 static inline bool
can_chain_query_pools(struct anv_query_pool * p1,struct anv_query_pool * p2)2285 can_chain_query_pools(struct anv_query_pool *p1, struct anv_query_pool *p2)
2286 {
2287 return (!p1 || !p2 || p1 == p2);
2288 }
2289
2290 static VkResult
anv_queue_submit_locked(struct anv_queue * queue,struct vk_queue_submit * submit)2291 anv_queue_submit_locked(struct anv_queue *queue,
2292 struct vk_queue_submit *submit)
2293 {
2294 VkResult result;
2295
2296 if (submit->command_buffer_count == 0) {
2297 result = anv_queue_exec_locked(queue, submit->wait_count, submit->waits,
2298 0 /* cmd_buffer_count */,
2299 NULL /* cmd_buffers */,
2300 submit->signal_count, submit->signals,
2301 NULL /* perf_query_pool */,
2302 0 /* perf_query_pass */);
2303 if (result != VK_SUCCESS)
2304 return result;
2305 } else {
2306 /* Everything's easier if we don't have to bother with container_of() */
2307 STATIC_ASSERT(offsetof(struct anv_cmd_buffer, vk) == 0);
2308 struct vk_command_buffer **vk_cmd_buffers = submit->command_buffers;
2309 struct anv_cmd_buffer **cmd_buffers = (void *)vk_cmd_buffers;
2310 uint32_t start = 0;
2311 uint32_t end = submit->command_buffer_count;
2312 struct anv_query_pool *perf_query_pool =
2313 cmd_buffers[start]->perf_query_pool;
2314 for (uint32_t n = 0; n < end; n++) {
2315 bool can_chain = false;
2316 uint32_t next = n + 1;
2317 /* Can we chain the last buffer into the next one? */
2318 if (next < end &&
2319 anv_cmd_buffer_is_chainable(cmd_buffers[next]) &&
2320 can_chain_query_pools
2321 (cmd_buffers[next]->perf_query_pool, perf_query_pool)) {
2322 can_chain = true;
2323 perf_query_pool =
2324 perf_query_pool ? perf_query_pool :
2325 cmd_buffers[next]->perf_query_pool;
2326 }
2327 if (!can_chain) {
2328 /* The next buffer cannot be chained, or we have reached the
2329 * last buffer, submit what have been chained so far.
2330 */
2331 VkResult result =
2332 anv_queue_exec_locked(queue,
2333 start == 0 ? submit->wait_count : 0,
2334 start == 0 ? submit->waits : NULL,
2335 next - start, &cmd_buffers[start],
2336 next == end ? submit->signal_count : 0,
2337 next == end ? submit->signals : NULL,
2338 perf_query_pool,
2339 submit->perf_pass_index);
2340 if (result != VK_SUCCESS)
2341 return result;
2342 if (next < end) {
2343 start = next;
2344 perf_query_pool = cmd_buffers[start]->perf_query_pool;
2345 }
2346 }
2347 }
2348 }
2349 for (uint32_t i = 0; i < submit->signal_count; i++) {
2350 if (!vk_sync_is_anv_bo_sync(submit->signals[i].sync))
2351 continue;
2352
2353 struct anv_bo_sync *bo_sync =
2354 container_of(submit->signals[i].sync, struct anv_bo_sync, sync);
2355
2356 /* Once the execbuf has returned, we need to set the fence state to
2357 * SUBMITTED. We can't do this before calling execbuf because
2358 * anv_GetFenceStatus does take the global device lock before checking
2359 * fence->state.
2360 *
2361 * We set the fence state to SUBMITTED regardless of whether or not the
2362 * execbuf succeeds because we need to ensure that vkWaitForFences() and
2363 * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
2364 * VK_SUCCESS) in a finite amount of time even if execbuf fails.
2365 */
2366 assert(bo_sync->state == ANV_BO_SYNC_STATE_RESET);
2367 bo_sync->state = ANV_BO_SYNC_STATE_SUBMITTED;
2368 }
2369
2370 pthread_cond_broadcast(&queue->device->queue_submit);
2371
2372 return VK_SUCCESS;
2373 }
2374
2375 VkResult
anv_queue_submit(struct vk_queue * vk_queue,struct vk_queue_submit * submit)2376 anv_queue_submit(struct vk_queue *vk_queue,
2377 struct vk_queue_submit *submit)
2378 {
2379 struct anv_queue *queue = container_of(vk_queue, struct anv_queue, vk);
2380 struct anv_device *device = queue->device;
2381 VkResult result;
2382
2383 if (queue->device->info->no_hw) {
2384 for (uint32_t i = 0; i < submit->signal_count; i++) {
2385 result = vk_sync_signal(&device->vk,
2386 submit->signals[i].sync,
2387 submit->signals[i].signal_value);
2388 if (result != VK_SUCCESS)
2389 return vk_queue_set_lost(&queue->vk, "vk_sync_signal failed");
2390 }
2391 return VK_SUCCESS;
2392 }
2393
2394 uint64_t start_ts = intel_ds_begin_submit(&queue->ds);
2395
2396 pthread_mutex_lock(&device->mutex);
2397 result = anv_queue_submit_locked(queue, submit);
2398 /* Take submission ID under lock */
2399 pthread_mutex_unlock(&device->mutex);
2400
2401 intel_ds_end_submit(&queue->ds, start_ts);
2402
2403 return result;
2404 }
2405
2406 VkResult
anv_queue_submit_simple_batch(struct anv_queue * queue,struct anv_batch * batch)2407 anv_queue_submit_simple_batch(struct anv_queue *queue,
2408 struct anv_batch *batch)
2409 {
2410 struct anv_device *device = queue->device;
2411 VkResult result = VK_SUCCESS;
2412 int err;
2413
2414 if (queue->device->info->no_hw)
2415 return VK_SUCCESS;
2416
2417 /* This is only used by device init so we can assume the queue is empty and
2418 * we aren't fighting with a submit thread.
2419 */
2420 assert(vk_queue_is_empty(&queue->vk));
2421
2422 uint32_t batch_size = align(batch->next - batch->start, 8);
2423
2424 struct anv_bo *batch_bo = NULL;
2425 result = anv_bo_pool_alloc(&device->batch_bo_pool, batch_size, &batch_bo);
2426 if (result != VK_SUCCESS)
2427 return result;
2428
2429 memcpy(batch_bo->map, batch->start, batch_size);
2430 #ifdef SUPPORT_INTEL_INTEGRATED_GPUS
2431 if (device->physical->memory.need_flush)
2432 intel_flush_range(batch_bo->map, batch_size);
2433 #endif
2434
2435 struct anv_execbuf execbuf = {
2436 .alloc = &queue->device->vk.alloc,
2437 .alloc_scope = VK_SYSTEM_ALLOCATION_SCOPE_DEVICE,
2438 };
2439
2440 result = anv_execbuf_add_bo(device, &execbuf, batch_bo, NULL, 0);
2441 if (result != VK_SUCCESS)
2442 goto fail;
2443
2444 if (INTEL_DEBUG(DEBUG_BATCH)) {
2445 intel_print_batch(&device->decoder_ctx,
2446 batch_bo->map,
2447 batch_bo->size,
2448 batch_bo->offset, false);
2449 }
2450
2451 execbuf.execbuf = (struct drm_i915_gem_execbuffer2) {
2452 .buffers_ptr = (uintptr_t) execbuf.objects,
2453 .buffer_count = execbuf.bo_count,
2454 .batch_start_offset = 0,
2455 .batch_len = batch_size,
2456 .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags | I915_EXEC_NO_RELOC,
2457 .rsvd1 = device->context_id,
2458 .rsvd2 = 0,
2459 };
2460
2461 err = anv_gem_execbuffer(device, &execbuf.execbuf);
2462 if (err) {
2463 result = vk_device_set_lost(&device->vk, "anv_gem_execbuffer failed: %m");
2464 goto fail;
2465 }
2466
2467 result = anv_device_wait(device, batch_bo, INT64_MAX);
2468 if (result != VK_SUCCESS) {
2469 result = vk_device_set_lost(&device->vk,
2470 "anv_device_wait failed: %m");
2471 goto fail;
2472 }
2473
2474 fail:
2475 anv_execbuf_finish(&execbuf);
2476 anv_bo_pool_free(&device->batch_bo_pool, batch_bo);
2477
2478 return result;
2479 }
2480