1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "util/os_time.h"
33
34 #include "genxml/gen_macros.h"
35 #include "genxml/genX_pack.h"
36
37 #include "ds/intel_tracepoints.h"
38
39 #include "anv_internal_kernels.h"
40 #include "genX_mi_builder.h"
41
42 #if GFX_VERx10 >= 125
43 #define ANV_PIPELINE_STATISTICS_MASK 0x00001fff
44 #else
45 #define ANV_PIPELINE_STATISTICS_MASK 0x000007ff
46 #endif
47
48 #include "perf/intel_perf.h"
49 #include "perf/intel_perf_mdapi.h"
50 #include "perf/intel_perf_regs.h"
51
52 #include "vk_util.h"
53
54 static struct anv_address
anv_query_address(struct anv_query_pool * pool,uint32_t query)55 anv_query_address(struct anv_query_pool *pool, uint32_t query)
56 {
57 return (struct anv_address) {
58 .bo = pool->bo,
59 .offset = query * pool->stride,
60 };
61 }
62
63 static void
emit_query_mi_flush_availability(struct anv_cmd_buffer * cmd_buffer,struct anv_address addr,bool available)64 emit_query_mi_flush_availability(struct anv_cmd_buffer *cmd_buffer,
65 struct anv_address addr,
66 bool available)
67 {
68 anv_batch_emit(&cmd_buffer->batch, GENX(MI_FLUSH_DW), flush) {
69 flush.PostSyncOperation = WriteImmediateData;
70 flush.Address = addr;
71 flush.ImmediateData = available;
72 }
73 }
74
genX(CreateQueryPool)75 VkResult genX(CreateQueryPool)(
76 VkDevice _device,
77 const VkQueryPoolCreateInfo* pCreateInfo,
78 const VkAllocationCallbacks* pAllocator,
79 VkQueryPool* pQueryPool)
80 {
81 ANV_FROM_HANDLE(anv_device, device, _device);
82 const struct anv_physical_device *pdevice = device->physical;
83 const VkQueryPoolPerformanceCreateInfoKHR *perf_query_info = NULL;
84 struct intel_perf_counter_pass *counter_pass;
85 struct intel_perf_query_info **pass_query;
86 uint32_t n_passes = 0;
87 uint32_t data_offset = 0;
88 VK_MULTIALLOC(ma);
89 VkResult result;
90
91 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
92
93 /* Query pool slots are made up of some number of 64-bit values packed
94 * tightly together. For most query types have the first 64-bit value is
95 * the "available" bit which is 0 when the query is unavailable and 1 when
96 * it is available. The 64-bit values that follow are determined by the
97 * type of query.
98 *
99 * For performance queries, we have a requirement to align OA reports at
100 * 64bytes so we put those first and have the "available" bit behind
101 * together with some other counters.
102 */
103 uint32_t uint64s_per_slot = 0;
104
105 VK_MULTIALLOC_DECL(&ma, struct anv_query_pool, pool, 1);
106
107 VkQueryPipelineStatisticFlags pipeline_statistics = 0;
108 switch (pCreateInfo->queryType) {
109 case VK_QUERY_TYPE_OCCLUSION:
110 /* Occlusion queries have two values: begin and end. */
111 uint64s_per_slot = 1 + 2;
112 break;
113 case VK_QUERY_TYPE_TIMESTAMP:
114 /* Timestamps just have the one timestamp value */
115 uint64s_per_slot = 1 + 1;
116 break;
117 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
118 pipeline_statistics = pCreateInfo->pipelineStatistics;
119 /* We're going to trust this field implicitly so we need to ensure that
120 * no unhandled extension bits leak in.
121 */
122 pipeline_statistics &= ANV_PIPELINE_STATISTICS_MASK;
123
124 /* Statistics queries have a min and max for every statistic */
125 uint64s_per_slot = 1 + 2 * util_bitcount(pipeline_statistics);
126 break;
127 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
128 /* Transform feedback queries are 4 values, begin/end for
129 * written/available.
130 */
131 uint64s_per_slot = 1 + 4;
132 break;
133 case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
134 const struct intel_perf_query_field_layout *layout =
135 &pdevice->perf->query_layout;
136
137 uint64s_per_slot = 2; /* availability + marker */
138 /* Align to the requirement of the layout */
139 uint64s_per_slot = align(uint64s_per_slot,
140 DIV_ROUND_UP(layout->alignment, sizeof(uint64_t)));
141 data_offset = uint64s_per_slot * sizeof(uint64_t);
142 /* Add the query data for begin & end commands */
143 uint64s_per_slot += 2 * DIV_ROUND_UP(layout->size, sizeof(uint64_t));
144 break;
145 }
146 case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
147 const struct intel_perf_query_field_layout *layout =
148 &pdevice->perf->query_layout;
149 const struct anv_queue_family *queue_family;
150
151 perf_query_info = vk_find_struct_const(pCreateInfo->pNext,
152 QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR);
153 /* Same restriction as in EnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR() */
154 queue_family = &pdevice->queue.families[perf_query_info->queueFamilyIndex];
155 if (!queue_family->supports_perf)
156 return vk_error(device, VK_ERROR_UNKNOWN);
157
158 n_passes = intel_perf_get_n_passes(pdevice->perf,
159 perf_query_info->pCounterIndices,
160 perf_query_info->counterIndexCount,
161 NULL);
162 vk_multialloc_add(&ma, &counter_pass, struct intel_perf_counter_pass,
163 perf_query_info->counterIndexCount);
164 vk_multialloc_add(&ma, &pass_query, struct intel_perf_query_info *,
165 n_passes);
166 uint64s_per_slot = 1 /* availability */;
167 /* Align to the requirement of the layout */
168 uint64s_per_slot = align(uint64s_per_slot,
169 DIV_ROUND_UP(layout->alignment, sizeof(uint64_t)));
170 data_offset = uint64s_per_slot * sizeof(uint64_t);
171 /* Add the query data for begin & end commands */
172 uint64s_per_slot += 2 * DIV_ROUND_UP(layout->size, sizeof(uint64_t));
173 /* Multiply by the number of passes */
174 uint64s_per_slot *= n_passes;
175 break;
176 }
177 case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
178 /* Query has two values: begin and end. */
179 uint64s_per_slot = 1 + 2;
180 break;
181 #if GFX_VERx10 >= 125
182 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
183 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
184 uint64s_per_slot = 1 + 1 /* availability + size (PostbuildInfoCurrentSize, PostbuildInfoCompactedSize) */;
185 break;
186
187 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
188 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR:
189 uint64s_per_slot = 1 + 2 /* availability + size (PostbuildInfoSerializationDesc) */;
190 break;
191
192 case VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT:
193 /* Query has two values: begin and end. */
194 uint64s_per_slot = 1 + 2;
195 break;
196
197 #endif
198 case VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR:
199 uint64s_per_slot = 1;
200 break;
201 case VK_QUERY_TYPE_VIDEO_ENCODE_FEEDBACK_KHR:
202 uint64s_per_slot = 1 + 1; /* availability + length of written bitstream data */
203 break;
204 default:
205 assert(!"Invalid query type");
206 }
207
208 if (!vk_multialloc_zalloc2(&ma, &device->vk.alloc, pAllocator,
209 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT))
210 return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
211
212 vk_query_pool_init(&device->vk, &pool->vk, pCreateInfo);
213 pool->stride = uint64s_per_slot * sizeof(uint64_t);
214
215 if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL) {
216 pool->data_offset = data_offset;
217 pool->snapshot_size = (pool->stride - data_offset) / 2;
218 }
219 else if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
220 pool->pass_size = pool->stride / n_passes;
221 pool->data_offset = data_offset;
222 pool->snapshot_size = (pool->pass_size - data_offset) / 2;
223 pool->n_counters = perf_query_info->counterIndexCount;
224 pool->counter_pass = counter_pass;
225 intel_perf_get_counters_passes(pdevice->perf,
226 perf_query_info->pCounterIndices,
227 perf_query_info->counterIndexCount,
228 pool->counter_pass);
229 pool->n_passes = n_passes;
230 pool->pass_query = pass_query;
231 intel_perf_get_n_passes(pdevice->perf,
232 perf_query_info->pCounterIndices,
233 perf_query_info->counterIndexCount,
234 pool->pass_query);
235 } else if (pool->vk.query_type == VK_QUERY_TYPE_VIDEO_ENCODE_FEEDBACK_KHR) {
236 const VkVideoProfileInfoKHR* pVideoProfile = vk_find_struct_const(pCreateInfo->pNext, VIDEO_PROFILE_INFO_KHR);
237 assert (pVideoProfile);
238
239 pool->codec = pVideoProfile->videoCodecOperation;
240 }
241
242 uint64_t size = pool->vk.query_count * (uint64_t)pool->stride;
243
244 /* For KHR_performance_query we need some space in the buffer for a small
245 * batch updating ANV_PERF_QUERY_OFFSET_REG.
246 */
247 if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
248 pool->khr_perf_preamble_stride = 32;
249 pool->khr_perf_preambles_offset = size;
250 size += (uint64_t)pool->n_passes * pool->khr_perf_preamble_stride;
251 }
252
253 result = anv_device_alloc_bo(device, "query-pool", size,
254 ANV_BO_ALLOC_MAPPED |
255 ANV_BO_ALLOC_HOST_CACHED_COHERENT |
256 ANV_BO_ALLOC_CAPTURE,
257 0 /* explicit_address */,
258 &pool->bo);
259 if (result != VK_SUCCESS)
260 goto fail;
261
262 if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
263 for (uint32_t p = 0; p < pool->n_passes; p++) {
264 struct mi_builder b;
265 struct anv_batch batch = {
266 .start = pool->bo->map + khr_perf_query_preamble_offset(pool, p),
267 .end = pool->bo->map + khr_perf_query_preamble_offset(pool, p) + pool->khr_perf_preamble_stride,
268 };
269 batch.next = batch.start;
270
271 mi_builder_init(&b, device->info, &batch);
272 mi_store(&b, mi_reg64(ANV_PERF_QUERY_OFFSET_REG),
273 mi_imm(p * (uint64_t)pool->pass_size));
274 anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END), bbe);
275 }
276 }
277
278 ANV_RMV(query_pool_create, device, pool, false);
279
280 *pQueryPool = anv_query_pool_to_handle(pool);
281
282 return VK_SUCCESS;
283
284 fail:
285 vk_free2(&device->vk.alloc, pAllocator, pool);
286
287 return result;
288 }
289
genX(DestroyQueryPool)290 void genX(DestroyQueryPool)(
291 VkDevice _device,
292 VkQueryPool _pool,
293 const VkAllocationCallbacks* pAllocator)
294 {
295 ANV_FROM_HANDLE(anv_device, device, _device);
296 ANV_FROM_HANDLE(anv_query_pool, pool, _pool);
297
298 if (!pool)
299 return;
300
301 ANV_RMV(resource_destroy, device, pool);
302
303 anv_device_release_bo(device, pool->bo);
304 vk_object_free(&device->vk, pAllocator, pool);
305 }
306
307 /**
308 * VK_KHR_performance_query layout :
309 *
310 * --------------------------------------------
311 * | availability (8b) | | |
312 * |-------------------------------| | |
313 * | some padding (see | | |
314 * | query_field_layout:alignment) | | Pass 0 |
315 * |-------------------------------| | |
316 * | query data | | |
317 * | (2 * query_field_layout:size) | | |
318 * |-------------------------------|-- | Query 0
319 * | availability (8b) | | |
320 * |-------------------------------| | |
321 * | some padding (see | | |
322 * | query_field_layout:alignment) | | Pass 1 |
323 * |-------------------------------| | |
324 * | query data | | |
325 * | (2 * query_field_layout:size) | | |
326 * |-------------------------------|-----------
327 * | availability (8b) | | |
328 * |-------------------------------| | |
329 * | some padding (see | | |
330 * | query_field_layout:alignment) | | Pass 0 |
331 * |-------------------------------| | |
332 * | query data | | |
333 * | (2 * query_field_layout:size) | | |
334 * |-------------------------------|-- | Query 1
335 * | ... | | |
336 * --------------------------------------------
337 */
338
339 static uint64_t
khr_perf_query_availability_offset(struct anv_query_pool * pool,uint32_t query,uint32_t pass)340 khr_perf_query_availability_offset(struct anv_query_pool *pool, uint32_t query, uint32_t pass)
341 {
342 return (query * (uint64_t)pool->stride) + (pass * (uint64_t)pool->pass_size);
343 }
344
345 static uint64_t
khr_perf_query_data_offset(struct anv_query_pool * pool,uint32_t query,uint32_t pass,bool end)346 khr_perf_query_data_offset(struct anv_query_pool *pool, uint32_t query, uint32_t pass, bool end)
347 {
348 return khr_perf_query_availability_offset(pool, query, pass) +
349 pool->data_offset + (end ? pool->snapshot_size : 0);
350 }
351
352 static struct anv_address
khr_perf_query_availability_address(struct anv_query_pool * pool,uint32_t query,uint32_t pass)353 khr_perf_query_availability_address(struct anv_query_pool *pool, uint32_t query, uint32_t pass)
354 {
355 return anv_address_add(
356 (struct anv_address) { .bo = pool->bo, },
357 khr_perf_query_availability_offset(pool, query, pass));
358 }
359
360 static struct anv_address
khr_perf_query_data_address(struct anv_query_pool * pool,uint32_t query,uint32_t pass,bool end)361 khr_perf_query_data_address(struct anv_query_pool *pool, uint32_t query, uint32_t pass, bool end)
362 {
363 return anv_address_add(
364 (struct anv_address) { .bo = pool->bo, },
365 khr_perf_query_data_offset(pool, query, pass, end));
366 }
367
368 static bool
khr_perf_query_ensure_relocs(struct anv_cmd_buffer * cmd_buffer)369 khr_perf_query_ensure_relocs(struct anv_cmd_buffer *cmd_buffer)
370 {
371 if (anv_batch_has_error(&cmd_buffer->batch))
372 return false;
373
374 if (cmd_buffer->self_mod_locations)
375 return true;
376
377 struct anv_device *device = cmd_buffer->device;
378 const struct anv_physical_device *pdevice = device->physical;
379
380 cmd_buffer->self_mod_locations =
381 vk_alloc(&cmd_buffer->vk.pool->alloc,
382 pdevice->n_perf_query_commands * sizeof(*cmd_buffer->self_mod_locations), 8,
383 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
384
385 if (!cmd_buffer->self_mod_locations) {
386 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
387 return false;
388 }
389
390 return true;
391 }
392
393 /**
394 * VK_INTEL_performance_query layout :
395 *
396 * ---------------------------------
397 * | availability (8b) |
398 * |-------------------------------|
399 * | marker (8b) |
400 * |-------------------------------|
401 * | some padding (see |
402 * | query_field_layout:alignment) |
403 * |-------------------------------|
404 * | query data |
405 * | (2 * query_field_layout:size) |
406 * ---------------------------------
407 */
408
409 static uint32_t
intel_perf_marker_offset(void)410 intel_perf_marker_offset(void)
411 {
412 return 8;
413 }
414
415 static uint32_t
intel_perf_query_data_offset(struct anv_query_pool * pool,bool end)416 intel_perf_query_data_offset(struct anv_query_pool *pool, bool end)
417 {
418 return pool->data_offset + (end ? pool->snapshot_size : 0);
419 }
420
421 static void
cpu_write_query_result(void * dst_slot,VkQueryResultFlags flags,uint32_t value_index,uint64_t result)422 cpu_write_query_result(void *dst_slot, VkQueryResultFlags flags,
423 uint32_t value_index, uint64_t result)
424 {
425 if (flags & VK_QUERY_RESULT_64_BIT) {
426 uint64_t *dst64 = dst_slot;
427 dst64[value_index] = result;
428 } else {
429 uint32_t *dst32 = dst_slot;
430 dst32[value_index] = result;
431 }
432 }
433
434 static void *
query_slot(struct anv_query_pool * pool,uint32_t query)435 query_slot(struct anv_query_pool *pool, uint32_t query)
436 {
437 return pool->bo->map + query * pool->stride;
438 }
439
440 static bool
query_is_available(struct anv_query_pool * pool,uint32_t query)441 query_is_available(struct anv_query_pool *pool, uint32_t query)
442 {
443 if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
444 for (uint32_t p = 0; p < pool->n_passes; p++) {
445 volatile uint64_t *slot =
446 pool->bo->map + khr_perf_query_availability_offset(pool, query, p);
447 if (!slot[0])
448 return false;
449 }
450 return true;
451 }
452
453 return *(volatile uint64_t *)query_slot(pool, query);
454 }
455
456 static VkResult
wait_for_available(struct anv_device * device,struct anv_query_pool * pool,uint32_t query)457 wait_for_available(struct anv_device *device,
458 struct anv_query_pool *pool, uint32_t query)
459 {
460 /* By default we leave a 2s timeout before declaring the device lost. */
461 uint64_t rel_timeout = 2 * NSEC_PER_SEC;
462 if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
463 /* With performance queries, there is an additional 500us reconfiguration
464 * time in i915.
465 */
466 rel_timeout += 500 * 1000;
467 /* Additionally a command buffer can be replayed N times to gather data
468 * for each of the metric sets to capture all the counters requested.
469 */
470 rel_timeout *= pool->n_passes;
471 }
472 uint64_t abs_timeout_ns = os_time_get_absolute_timeout(rel_timeout);
473
474 while (os_time_get_nano() < abs_timeout_ns) {
475 if (query_is_available(pool, query))
476 return VK_SUCCESS;
477 VkResult status = vk_device_check_status(&device->vk);
478 if (status != VK_SUCCESS)
479 return status;
480 }
481
482 return vk_device_set_lost(&device->vk, "query timeout");
483 }
484
genX(GetQueryPoolResults)485 VkResult genX(GetQueryPoolResults)(
486 VkDevice _device,
487 VkQueryPool queryPool,
488 uint32_t firstQuery,
489 uint32_t queryCount,
490 size_t dataSize,
491 void* pData,
492 VkDeviceSize stride,
493 VkQueryResultFlags flags)
494 {
495 ANV_FROM_HANDLE(anv_device, device, _device);
496 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
497
498 assert(
499 #if GFX_VERx10 >= 125
500 pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR ||
501 pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR ||
502 pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR ||
503 pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR ||
504 pool->vk.query_type == VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT ||
505 #endif
506 pool->vk.query_type == VK_QUERY_TYPE_OCCLUSION ||
507 pool->vk.query_type == VK_QUERY_TYPE_PIPELINE_STATISTICS ||
508 pool->vk.query_type == VK_QUERY_TYPE_TIMESTAMP ||
509 pool->vk.query_type == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT ||
510 pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR ||
511 pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL ||
512 pool->vk.query_type == VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT ||
513 pool->vk.query_type == VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR ||
514 pool->vk.query_type == VK_QUERY_TYPE_VIDEO_ENCODE_FEEDBACK_KHR);
515
516 if (vk_device_is_lost(&device->vk))
517 return VK_ERROR_DEVICE_LOST;
518
519 if (pData == NULL)
520 return VK_SUCCESS;
521
522 void *data_end = pData + dataSize;
523
524 VkResult status = VK_SUCCESS;
525 for (uint32_t i = 0; i < queryCount; i++) {
526 bool available = query_is_available(pool, firstQuery + i);
527
528 if (!available && (flags & VK_QUERY_RESULT_WAIT_BIT)) {
529 status = wait_for_available(device, pool, firstQuery + i);
530 if (status != VK_SUCCESS) {
531 return status;
532 }
533
534 available = true;
535 }
536
537 /* From the Vulkan 1.0.42 spec:
538 *
539 * "If VK_QUERY_RESULT_WAIT_BIT and VK_QUERY_RESULT_PARTIAL_BIT are
540 * both not set then no result values are written to pData for
541 * queries that are in the unavailable state at the time of the call,
542 * and vkGetQueryPoolResults returns VK_NOT_READY. However,
543 * availability state is still written to pData for those queries if
544 * VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set."
545 *
546 * From VK_KHR_performance_query :
547 *
548 * "VK_QUERY_RESULT_PERFORMANCE_QUERY_RECORDED_COUNTERS_BIT_KHR specifies
549 * that the result should contain the number of counters that were recorded
550 * into a query pool of type ename:VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR"
551 */
552 bool write_results = available || (flags & VK_QUERY_RESULT_PARTIAL_BIT);
553
554 uint32_t idx = 0;
555 switch (pool->vk.query_type) {
556 case VK_QUERY_TYPE_OCCLUSION:
557 case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
558 #if GFX_VERx10 >= 125
559 case VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT:
560 #endif
561 {
562 uint64_t *slot = query_slot(pool, firstQuery + i);
563 if (write_results) {
564 /* From the Vulkan 1.2.132 spec:
565 *
566 * "If VK_QUERY_RESULT_PARTIAL_BIT is set,
567 * VK_QUERY_RESULT_WAIT_BIT is not set, and the query’s status
568 * is unavailable, an intermediate result value between zero and
569 * the final result value is written to pData for that query."
570 */
571 uint64_t result = available ? slot[2] - slot[1] : 0;
572 cpu_write_query_result(pData, flags, idx, result);
573 }
574 idx++;
575 break;
576 }
577
578 case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
579 uint64_t *slot = query_slot(pool, firstQuery + i);
580 uint32_t statistics = pool->vk.pipeline_statistics;
581 while (statistics) {
582 UNUSED uint32_t stat = u_bit_scan(&statistics);
583 if (write_results) {
584 /* If a query is not available but VK_QUERY_RESULT_PARTIAL_BIT is set, write 0. */
585 uint64_t result = available ? slot[idx * 2 + 2] - slot[idx * 2 + 1] : 0;
586 cpu_write_query_result(pData, flags, idx, result);
587 }
588 idx++;
589 }
590 assert(idx == util_bitcount(pool->vk.pipeline_statistics));
591 break;
592 }
593
594 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: {
595 uint64_t *slot = query_slot(pool, firstQuery + i);
596 if (write_results) {
597 /* If a query is not available but VK_QUERY_RESULT_PARTIAL_BIT is set, write 0. */
598 uint64_t result = available ? slot[2] - slot[1] : 0;
599 cpu_write_query_result(pData, flags, idx, result);
600 }
601 idx++;
602 if (write_results) {
603 /* If a query is not available but VK_QUERY_RESULT_PARTIAL_BIT is set, write 0. */
604 uint64_t result = available ? slot[4] - slot[3] : 0;
605 cpu_write_query_result(pData, flags, idx, result);
606 }
607 idx++;
608 break;
609 }
610
611 #if GFX_VERx10 >= 125
612 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
613 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
614 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR: {
615 uint64_t *slot = query_slot(pool, firstQuery + i);
616 if (write_results)
617 cpu_write_query_result(pData, flags, idx, slot[1]);
618 idx++;
619 break;
620 }
621
622 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR: {
623 uint64_t *slot = query_slot(pool, firstQuery + i);
624 if (write_results)
625 cpu_write_query_result(pData, flags, idx, slot[2]);
626 idx++;
627 break;
628 }
629 #endif
630
631 case VK_QUERY_TYPE_TIMESTAMP: {
632 uint64_t *slot = query_slot(pool, firstQuery + i);
633 if (write_results)
634 cpu_write_query_result(pData, flags, idx, slot[1]);
635 idx++;
636 break;
637 }
638
639 case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
640 const struct anv_physical_device *pdevice = device->physical;
641 assert((flags & (VK_QUERY_RESULT_WITH_AVAILABILITY_BIT |
642 VK_QUERY_RESULT_PARTIAL_BIT)) == 0);
643 for (uint32_t p = 0; p < pool->n_passes; p++) {
644 const struct intel_perf_query_info *query = pool->pass_query[p];
645 struct intel_perf_query_result result;
646 intel_perf_query_result_clear(&result);
647 intel_perf_query_result_accumulate_fields(&result, query,
648 pool->bo->map + khr_perf_query_data_offset(pool, firstQuery + i, p, false),
649 pool->bo->map + khr_perf_query_data_offset(pool, firstQuery + i, p, true),
650 false /* no_oa_accumulate */);
651 anv_perf_write_pass_results(pdevice->perf, pool, p, &result, pData);
652 }
653 break;
654 }
655
656 case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
657 if (!write_results)
658 break;
659 const void *query_data = query_slot(pool, firstQuery + i);
660 const struct intel_perf_query_info *query = &device->physical->perf->queries[0];
661 struct intel_perf_query_result result;
662 intel_perf_query_result_clear(&result);
663 intel_perf_query_result_accumulate_fields(&result, query,
664 query_data + intel_perf_query_data_offset(pool, false),
665 query_data + intel_perf_query_data_offset(pool, true),
666 false /* no_oa_accumulate */);
667 intel_perf_query_result_write_mdapi(pData, stride,
668 device->info,
669 query, &result);
670 const uint64_t *marker = query_data + intel_perf_marker_offset();
671 intel_perf_query_mdapi_write_marker(pData, stride, device->info, *marker);
672 break;
673 }
674
675 case VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR:
676 if (!write_results)
677 break;
678 const uint32_t *query_data = query_slot(pool, firstQuery + i);
679 uint32_t result = available ? *query_data : 0;
680 cpu_write_query_result(pData, flags, idx, result);
681 break;
682 case VK_QUERY_TYPE_VIDEO_ENCODE_FEEDBACK_KHR: {
683 if (!write_results)
684 break;
685
686 /*
687 * Slot 0 : Availability.
688 * Slot 1 : Bitstream bytes written.
689 */
690 const uint64_t *slot = query_slot(pool, firstQuery + i);
691 /* Set 0 as offset. */
692 cpu_write_query_result(pData, flags, idx++, 0);
693 cpu_write_query_result(pData, flags, idx++, slot[1]);
694 break;
695 }
696
697 default:
698 unreachable("invalid pool type");
699 }
700
701 if (!write_results)
702 status = VK_NOT_READY;
703
704 if (flags & (VK_QUERY_RESULT_WITH_AVAILABILITY_BIT |
705 VK_QUERY_RESULT_WITH_STATUS_BIT_KHR))
706 cpu_write_query_result(pData, flags, idx, available);
707
708 pData += stride;
709 if (pData >= data_end)
710 break;
711 }
712
713 return status;
714 }
715
716 static void
emit_ps_depth_count(struct anv_cmd_buffer * cmd_buffer,struct anv_address addr)717 emit_ps_depth_count(struct anv_cmd_buffer *cmd_buffer,
718 struct anv_address addr)
719 {
720 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
721 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
722
723 bool cs_stall_needed = (GFX_VER == 9 && cmd_buffer->device->info->gt == 4);
724 genx_batch_emit_pipe_control_write
725 (&cmd_buffer->batch, cmd_buffer->device->info,
726 cmd_buffer->state.current_pipeline, WritePSDepthCount, addr, 0,
727 ANV_PIPE_DEPTH_STALL_BIT | (cs_stall_needed ? ANV_PIPE_CS_STALL_BIT : 0));
728 }
729
730 static void
emit_query_mi_availability(struct mi_builder * b,struct anv_address addr,bool available)731 emit_query_mi_availability(struct mi_builder *b,
732 struct anv_address addr,
733 bool available)
734 {
735 mi_store(b, mi_mem64(addr), mi_imm(available));
736 }
737
738 static void
emit_query_pc_availability(struct anv_cmd_buffer * cmd_buffer,struct anv_address addr,bool available)739 emit_query_pc_availability(struct anv_cmd_buffer *cmd_buffer,
740 struct anv_address addr,
741 bool available)
742 {
743 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
744 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
745
746 genx_batch_emit_pipe_control_write
747 (&cmd_buffer->batch, cmd_buffer->device->info,
748 cmd_buffer->state.current_pipeline, WriteImmediateData, addr,
749 available, 0);
750 }
751
752 /**
753 * Goes through a series of consecutive query indices in the given pool
754 * setting all element values to 0 and emitting them as available.
755 */
756 static void
emit_zero_queries(struct anv_cmd_buffer * cmd_buffer,struct mi_builder * b,struct anv_query_pool * pool,uint32_t first_index,uint32_t num_queries)757 emit_zero_queries(struct anv_cmd_buffer *cmd_buffer,
758 struct mi_builder *b, struct anv_query_pool *pool,
759 uint32_t first_index, uint32_t num_queries)
760 {
761 switch (pool->vk.query_type) {
762 case VK_QUERY_TYPE_OCCLUSION:
763 case VK_QUERY_TYPE_TIMESTAMP:
764 /* These queries are written with a PIPE_CONTROL so clear them using the
765 * PIPE_CONTROL as well so we don't have to synchronize between 2 types
766 * of operations.
767 */
768 assert((pool->stride % 8) == 0);
769 for (uint32_t i = 0; i < num_queries; i++) {
770 struct anv_address slot_addr =
771 anv_query_address(pool, first_index + i);
772
773 for (uint32_t qword = 1; qword < (pool->stride / 8); qword++) {
774 emit_query_pc_availability(cmd_buffer,
775 anv_address_add(slot_addr, qword * 8),
776 false);
777 }
778 emit_query_pc_availability(cmd_buffer, slot_addr, true);
779 }
780 break;
781
782 case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
783 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
784 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
785 #if GFX_VERx10 >= 125
786 case VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT:
787 #endif
788 for (uint32_t i = 0; i < num_queries; i++) {
789 struct anv_address slot_addr =
790 anv_query_address(pool, first_index + i);
791 mi_memset(b, anv_address_add(slot_addr, 8), 0, pool->stride - 8);
792 emit_query_mi_availability(b, slot_addr, true);
793 }
794 break;
795
796 case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
797 for (uint32_t i = 0; i < num_queries; i++) {
798 for (uint32_t p = 0; p < pool->n_passes; p++) {
799 mi_memset(b, khr_perf_query_data_address(pool, first_index + i, p, false),
800 0, 2 * pool->snapshot_size);
801 emit_query_mi_availability(b,
802 khr_perf_query_availability_address(pool, first_index + i, p),
803 true);
804 }
805 }
806 break;
807 }
808
809 case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL:
810 for (uint32_t i = 0; i < num_queries; i++) {
811 struct anv_address slot_addr =
812 anv_query_address(pool, first_index + i);
813 mi_memset(b, anv_address_add(slot_addr, 8), 0, pool->stride - 8);
814 emit_query_mi_availability(b, slot_addr, true);
815 }
816 break;
817
818 default:
819 unreachable("Unsupported query type");
820 }
821 }
822
genX(CmdResetQueryPool)823 void genX(CmdResetQueryPool)(
824 VkCommandBuffer commandBuffer,
825 VkQueryPool queryPool,
826 uint32_t firstQuery,
827 uint32_t queryCount)
828 {
829 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
830 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
831 struct anv_physical_device *pdevice = cmd_buffer->device->physical;
832
833 /* Shader clearing is only possible on render/compute when not in protected
834 * mode.
835 */
836 if (anv_cmd_buffer_is_render_or_compute_queue(cmd_buffer) &&
837 (cmd_buffer->vk.pool->flags & VK_COMMAND_POOL_CREATE_PROTECTED_BIT) == 0 &&
838 queryCount >= pdevice->instance->query_clear_with_blorp_threshold) {
839 trace_intel_begin_query_clear_blorp(&cmd_buffer->trace);
840
841 anv_cmd_buffer_fill_area(cmd_buffer,
842 anv_query_address(pool, firstQuery),
843 queryCount * pool->stride,
844 0, false);
845
846 /* The pending clearing writes are in compute if we're in gpgpu mode on
847 * the render engine or on the compute engine.
848 */
849 if (anv_cmd_buffer_is_compute_queue(cmd_buffer) ||
850 cmd_buffer->state.current_pipeline == pdevice->gpgpu_pipeline_value) {
851 cmd_buffer->state.queries.clear_bits =
852 ANV_QUERY_COMPUTE_WRITES_PENDING_BITS;
853 } else {
854 cmd_buffer->state.queries.clear_bits =
855 ANV_QUERY_RENDER_TARGET_WRITES_PENDING_BITS(&pdevice->info);
856 }
857
858 trace_intel_end_query_clear_blorp(&cmd_buffer->trace, queryCount);
859 return;
860 }
861
862 trace_intel_begin_query_clear_cs(&cmd_buffer->trace);
863
864 switch (pool->vk.query_type) {
865 case VK_QUERY_TYPE_OCCLUSION:
866 #if GFX_VERx10 >= 125
867 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
868 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
869 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
870 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR:
871 #endif
872 for (uint32_t i = 0; i < queryCount; i++) {
873 emit_query_pc_availability(cmd_buffer,
874 anv_query_address(pool, firstQuery + i),
875 false);
876 }
877 break;
878
879 case VK_QUERY_TYPE_TIMESTAMP: {
880 for (uint32_t i = 0; i < queryCount; i++) {
881 emit_query_pc_availability(cmd_buffer,
882 anv_query_address(pool, firstQuery + i),
883 false);
884 }
885
886 /* Add a CS stall here to make sure the PIPE_CONTROL above has
887 * completed. Otherwise some timestamps written later with MI_STORE_*
888 * commands might race with the PIPE_CONTROL in the loop above.
889 */
890 anv_add_pending_pipe_bits(cmd_buffer, ANV_PIPE_CS_STALL_BIT,
891 "vkCmdResetQueryPool of timestamps");
892 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
893 break;
894 }
895
896 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
897 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
898 case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
899 case VK_QUERY_TYPE_VIDEO_ENCODE_FEEDBACK_KHR:
900 #if GFX_VERx10 >= 125
901 case VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT:
902 #endif
903 {
904 struct mi_builder b;
905 mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
906
907 for (uint32_t i = 0; i < queryCount; i++)
908 emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false);
909 break;
910 }
911
912 case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
913 struct mi_builder b;
914 mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
915
916 for (uint32_t i = 0; i < queryCount; i++) {
917 for (uint32_t p = 0; p < pool->n_passes; p++) {
918 emit_query_mi_availability(
919 &b,
920 khr_perf_query_availability_address(pool, firstQuery + i, p),
921 false);
922 }
923 }
924 break;
925 }
926
927 case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
928 struct mi_builder b;
929 mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
930
931 for (uint32_t i = 0; i < queryCount; i++)
932 emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false);
933 break;
934 }
935 case VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR:
936 for (uint32_t i = 0; i < queryCount; i++)
937 emit_query_mi_flush_availability(cmd_buffer, anv_query_address(pool, firstQuery + i), false);
938 break;
939 default:
940 unreachable("Unsupported query type");
941 }
942
943 trace_intel_end_query_clear_cs(&cmd_buffer->trace, queryCount);
944 }
945
genX(ResetQueryPool)946 void genX(ResetQueryPool)(
947 VkDevice _device,
948 VkQueryPool queryPool,
949 uint32_t firstQuery,
950 uint32_t queryCount)
951 {
952 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
953
954 for (uint32_t i = 0; i < queryCount; i++) {
955 if (pool->vk.query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
956 for (uint32_t p = 0; p < pool->n_passes; p++) {
957 uint64_t *pass_slot = pool->bo->map +
958 khr_perf_query_availability_offset(pool, firstQuery + i, p);
959 *pass_slot = 0;
960 }
961 } else {
962 uint64_t *slot = query_slot(pool, firstQuery + i);
963 *slot = 0;
964 }
965 }
966 }
967
968 static const uint32_t vk_pipeline_stat_to_reg[] = {
969 GENX(IA_VERTICES_COUNT_num),
970 GENX(IA_PRIMITIVES_COUNT_num),
971 GENX(VS_INVOCATION_COUNT_num),
972 GENX(GS_INVOCATION_COUNT_num),
973 GENX(GS_PRIMITIVES_COUNT_num),
974 GENX(CL_INVOCATION_COUNT_num),
975 GENX(CL_PRIMITIVES_COUNT_num),
976 GENX(PS_INVOCATION_COUNT_num),
977 GENX(HS_INVOCATION_COUNT_num),
978 GENX(DS_INVOCATION_COUNT_num),
979 GENX(CS_INVOCATION_COUNT_num),
980 #if GFX_VERx10 >= 125
981 GENX(TASK_INVOCATION_COUNT_num),
982 GENX(MESH_INVOCATION_COUNT_num)
983 #endif
984 };
985
986 static void
emit_pipeline_stat(struct mi_builder * b,uint32_t stat,struct anv_address addr)987 emit_pipeline_stat(struct mi_builder *b, uint32_t stat,
988 struct anv_address addr)
989 {
990 STATIC_ASSERT(ANV_PIPELINE_STATISTICS_MASK ==
991 (1 << ARRAY_SIZE(vk_pipeline_stat_to_reg)) - 1);
992
993 assert(stat < ARRAY_SIZE(vk_pipeline_stat_to_reg));
994 mi_store(b, mi_mem64(addr), mi_reg64(vk_pipeline_stat_to_reg[stat]));
995 }
996
997 static void
emit_xfb_query(struct mi_builder * b,uint32_t stream,struct anv_address addr)998 emit_xfb_query(struct mi_builder *b, uint32_t stream,
999 struct anv_address addr)
1000 {
1001 assert(stream < MAX_XFB_STREAMS);
1002
1003 mi_store(b, mi_mem64(anv_address_add(addr, 0)),
1004 mi_reg64(GENX(SO_NUM_PRIMS_WRITTEN0_num) + stream * 8));
1005 mi_store(b, mi_mem64(anv_address_add(addr, 16)),
1006 mi_reg64(GENX(SO_PRIM_STORAGE_NEEDED0_num) + stream * 8));
1007 }
1008
1009 static void
emit_perf_intel_query(struct anv_cmd_buffer * cmd_buffer,struct anv_query_pool * pool,struct mi_builder * b,struct anv_address query_addr,bool end)1010 emit_perf_intel_query(struct anv_cmd_buffer *cmd_buffer,
1011 struct anv_query_pool *pool,
1012 struct mi_builder *b,
1013 struct anv_address query_addr,
1014 bool end)
1015 {
1016 const struct intel_perf_query_field_layout *layout =
1017 &cmd_buffer->device->physical->perf->query_layout;
1018 struct anv_address data_addr =
1019 anv_address_add(query_addr, intel_perf_query_data_offset(pool, end));
1020
1021 for (uint32_t f = 0; f < layout->n_fields; f++) {
1022 const struct intel_perf_query_field *field =
1023 &layout->fields[end ? f : (layout->n_fields - 1 - f)];
1024
1025 switch (field->type) {
1026 case INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC:
1027 anv_batch_emit(&cmd_buffer->batch, GENX(MI_REPORT_PERF_COUNT), rpc) {
1028 rpc.MemoryAddress = anv_address_add(data_addr, field->location);
1029 }
1030 break;
1031
1032 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT:
1033 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT:
1034 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
1035 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
1036 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
1037 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_PEC: {
1038 struct anv_address addr = anv_address_add(data_addr, field->location);
1039 struct mi_value src = field->size == 8 ?
1040 mi_reg64(field->mmio_offset) :
1041 mi_reg32(field->mmio_offset);
1042 struct mi_value dst = field->size == 8 ?
1043 mi_mem64(addr) : mi_mem32(addr);
1044 mi_store(b, dst, src);
1045 break;
1046 }
1047
1048 default:
1049 unreachable("Invalid query field");
1050 break;
1051 }
1052 }
1053 }
1054
1055 static void
emit_query_clear_flush(struct anv_cmd_buffer * cmd_buffer,struct anv_query_pool * pool,const char * reason)1056 emit_query_clear_flush(struct anv_cmd_buffer *cmd_buffer,
1057 struct anv_query_pool *pool,
1058 const char *reason)
1059 {
1060 if (cmd_buffer->state.queries.clear_bits == 0)
1061 return;
1062
1063 anv_add_pending_pipe_bits(cmd_buffer,
1064 ANV_PIPE_QUERY_BITS(
1065 cmd_buffer->state.queries.clear_bits),
1066 reason);
1067 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1068 }
1069
1070
genX(CmdBeginQueryIndexedEXT)1071 void genX(CmdBeginQueryIndexedEXT)(
1072 VkCommandBuffer commandBuffer,
1073 VkQueryPool queryPool,
1074 uint32_t query,
1075 VkQueryControlFlags flags,
1076 uint32_t index)
1077 {
1078 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1079 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1080 struct anv_address query_addr = anv_query_address(pool, query);
1081
1082 emit_query_clear_flush(cmd_buffer, pool, "CmdBeginQuery* flush query clears");
1083
1084 struct mi_builder b;
1085 mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
1086 const uint32_t mocs = anv_mocs_for_address(cmd_buffer->device, &query_addr);
1087 mi_builder_set_mocs(&b, mocs);
1088
1089 switch (pool->vk.query_type) {
1090 case VK_QUERY_TYPE_OCCLUSION:
1091 cmd_buffer->state.gfx.n_occlusion_queries++;
1092 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_OCCLUSION_QUERY_ACTIVE;
1093 emit_ps_depth_count(cmd_buffer, anv_address_add(query_addr, 8));
1094 break;
1095
1096 case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
1097 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1098 cmd_buffer->device->info,
1099 cmd_buffer->state.current_pipeline,
1100 ANV_PIPE_CS_STALL_BIT |
1101 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1102 mi_store(&b, mi_mem64(anv_address_add(query_addr, 8)),
1103 mi_reg64(GENX(CL_INVOCATION_COUNT_num)));
1104 break;
1105
1106 #if GFX_VERx10 >= 125
1107 case VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT:
1108 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1109 cmd_buffer->device->info,
1110 cmd_buffer->state.current_pipeline,
1111 ANV_PIPE_CS_STALL_BIT |
1112 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1113 mi_store(&b, mi_mem64(anv_address_add(query_addr, 8)),
1114 mi_reg64(GENX(MESH_PRIMITIVE_COUNT_num)));
1115 break;
1116 #endif
1117
1118 case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
1119 /* TODO: This might only be necessary for certain stats */
1120 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1121 cmd_buffer->device->info,
1122 cmd_buffer->state.current_pipeline,
1123 ANV_PIPE_CS_STALL_BIT |
1124 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1125
1126 uint32_t statistics = pool->vk.pipeline_statistics;
1127 uint32_t offset = 8;
1128 while (statistics) {
1129 uint32_t stat = u_bit_scan(&statistics);
1130 emit_pipeline_stat(&b, stat, anv_address_add(query_addr, offset));
1131 offset += 16;
1132 }
1133 break;
1134 }
1135
1136 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1137 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1138 cmd_buffer->device->info,
1139 cmd_buffer->state.current_pipeline,
1140 ANV_PIPE_CS_STALL_BIT |
1141 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1142 emit_xfb_query(&b, index, anv_address_add(query_addr, 8));
1143 break;
1144
1145 case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
1146 if (!khr_perf_query_ensure_relocs(cmd_buffer))
1147 return;
1148
1149 const struct anv_physical_device *pdevice = cmd_buffer->device->physical;
1150 const struct intel_perf_query_field_layout *layout = &pdevice->perf->query_layout;
1151
1152 uint32_t reloc_idx = 0;
1153 for (uint32_t end = 0; end < 2; end++) {
1154 for (uint32_t r = 0; r < layout->n_fields; r++) {
1155 const struct intel_perf_query_field *field =
1156 &layout->fields[end ? r : (layout->n_fields - 1 - r)];
1157 struct mi_value reg_addr =
1158 mi_iadd(
1159 &b,
1160 mi_imm(intel_canonical_address(pool->bo->offset +
1161 khr_perf_query_data_offset(pool, query, 0, end) +
1162 field->location)),
1163 mi_reg64(ANV_PERF_QUERY_OFFSET_REG));
1164 cmd_buffer->self_mod_locations[reloc_idx++] =
1165 mi_store_relocated_address_reg64(&b, reg_addr);
1166
1167 if (field->type != INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC &&
1168 field->size == 8) {
1169 reg_addr =
1170 mi_iadd(
1171 &b,
1172 mi_imm(intel_canonical_address(pool->bo->offset +
1173 khr_perf_query_data_offset(pool, query, 0, end) +
1174 field->location + 4)),
1175 mi_reg64(ANV_PERF_QUERY_OFFSET_REG));
1176 cmd_buffer->self_mod_locations[reloc_idx++] =
1177 mi_store_relocated_address_reg64(&b, reg_addr);
1178 }
1179 }
1180 }
1181
1182 struct mi_value availability_write_offset =
1183 mi_iadd(
1184 &b,
1185 mi_imm(
1186 intel_canonical_address(
1187 pool->bo->offset +
1188 khr_perf_query_availability_offset(pool, query, 0 /* pass */))),
1189 mi_reg64(ANV_PERF_QUERY_OFFSET_REG));
1190 cmd_buffer->self_mod_locations[reloc_idx++] =
1191 mi_store_relocated_address_reg64(&b, availability_write_offset);
1192
1193 assert(reloc_idx == pdevice->n_perf_query_commands);
1194
1195 const struct intel_device_info *devinfo = cmd_buffer->device->info;
1196 const enum intel_engine_class engine_class = cmd_buffer->queue_family->engine_class;
1197 mi_self_mod_barrier(&b, devinfo->engine_class_prefetch[engine_class]);
1198
1199 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1200 cmd_buffer->device->info,
1201 cmd_buffer->state.current_pipeline,
1202 ANV_PIPE_CS_STALL_BIT |
1203 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1204 cmd_buffer->perf_query_pool = pool;
1205
1206 cmd_buffer->perf_reloc_idx = 0;
1207 for (uint32_t r = 0; r < layout->n_fields; r++) {
1208 const struct intel_perf_query_field *field =
1209 &layout->fields[layout->n_fields - 1 - r];
1210 void *dws;
1211
1212 switch (field->type) {
1213 case INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC:
1214 dws = anv_batch_emitn(&cmd_buffer->batch,
1215 GENX(MI_REPORT_PERF_COUNT_length),
1216 GENX(MI_REPORT_PERF_COUNT),
1217 .MemoryAddress = query_addr /* Will be overwritten */);
1218 mi_resolve_relocated_address_token(
1219 &b,
1220 cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1221 dws + GENX(MI_REPORT_PERF_COUNT_MemoryAddress_start) / 8);
1222 break;
1223
1224 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT:
1225 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT:
1226 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
1227 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
1228 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
1229 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_PEC:
1230 dws =
1231 anv_batch_emitn(&cmd_buffer->batch,
1232 GENX(MI_STORE_REGISTER_MEM_length),
1233 GENX(MI_STORE_REGISTER_MEM),
1234 .RegisterAddress = field->mmio_offset,
1235 .MemoryAddress = query_addr /* Will be overwritten */ );
1236 mi_resolve_relocated_address_token(
1237 &b,
1238 cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1239 dws + GENX(MI_STORE_REGISTER_MEM_MemoryAddress_start) / 8);
1240 if (field->size == 8) {
1241 dws =
1242 anv_batch_emitn(&cmd_buffer->batch,
1243 GENX(MI_STORE_REGISTER_MEM_length),
1244 GENX(MI_STORE_REGISTER_MEM),
1245 .RegisterAddress = field->mmio_offset + 4,
1246 .MemoryAddress = query_addr /* Will be overwritten */ );
1247 mi_resolve_relocated_address_token(
1248 &b,
1249 cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1250 dws + GENX(MI_STORE_REGISTER_MEM_MemoryAddress_start) / 8);
1251 }
1252 break;
1253
1254 default:
1255 unreachable("Invalid query field");
1256 break;
1257 }
1258 }
1259 break;
1260 }
1261
1262 case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
1263 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1264 cmd_buffer->device->info,
1265 cmd_buffer->state.current_pipeline,
1266 ANV_PIPE_CS_STALL_BIT |
1267 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1268 emit_perf_intel_query(cmd_buffer, pool, &b, query_addr, false);
1269 break;
1270 }
1271 case VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR:
1272 emit_query_mi_flush_availability(cmd_buffer, query_addr, false);
1273 break;
1274 case VK_QUERY_TYPE_VIDEO_ENCODE_FEEDBACK_KHR:
1275 emit_query_mi_availability(&b, query_addr, false);
1276 break;
1277 default:
1278 unreachable("");
1279 }
1280 }
1281
genX(CmdEndQueryIndexedEXT)1282 void genX(CmdEndQueryIndexedEXT)(
1283 VkCommandBuffer commandBuffer,
1284 VkQueryPool queryPool,
1285 uint32_t query,
1286 uint32_t index)
1287 {
1288 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1289 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1290 struct anv_address query_addr = anv_query_address(pool, query);
1291
1292 struct mi_builder b;
1293 mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
1294
1295 switch (pool->vk.query_type) {
1296 case VK_QUERY_TYPE_OCCLUSION:
1297 emit_ps_depth_count(cmd_buffer, anv_address_add(query_addr, 16));
1298 emit_query_pc_availability(cmd_buffer, query_addr, true);
1299 cmd_buffer->state.gfx.n_occlusion_queries--;
1300 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_OCCLUSION_QUERY_ACTIVE;
1301 break;
1302
1303 case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
1304 /* Ensure previous commands have completed before capturing the register
1305 * value.
1306 */
1307 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1308 cmd_buffer->device->info,
1309 cmd_buffer->state.current_pipeline,
1310 ANV_PIPE_CS_STALL_BIT |
1311 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1312
1313 mi_store(&b, mi_mem64(anv_address_add(query_addr, 16)),
1314 mi_reg64(GENX(CL_INVOCATION_COUNT_num)));
1315 emit_query_mi_availability(&b, query_addr, true);
1316 break;
1317
1318 #if GFX_VERx10 >= 125
1319 case VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT:
1320 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1321 cmd_buffer->device->info,
1322 cmd_buffer->state.current_pipeline,
1323 ANV_PIPE_CS_STALL_BIT |
1324 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1325 mi_store(&b, mi_mem64(anv_address_add(query_addr, 16)),
1326 mi_reg64(GENX(MESH_PRIMITIVE_COUNT_num)));
1327 emit_query_mi_availability(&b, query_addr, true);
1328 break;
1329 #endif
1330
1331 case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
1332 /* TODO: This might only be necessary for certain stats */
1333 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1334 cmd_buffer->device->info,
1335 cmd_buffer->state.current_pipeline,
1336 ANV_PIPE_CS_STALL_BIT |
1337 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1338
1339 uint32_t statistics = pool->vk.pipeline_statistics;
1340 uint32_t offset = 16;
1341 while (statistics) {
1342 uint32_t stat = u_bit_scan(&statistics);
1343 emit_pipeline_stat(&b, stat, anv_address_add(query_addr, offset));
1344 offset += 16;
1345 }
1346
1347 emit_query_mi_availability(&b, query_addr, true);
1348 break;
1349 }
1350
1351 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1352 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1353 cmd_buffer->device->info,
1354 cmd_buffer->state.current_pipeline,
1355 ANV_PIPE_CS_STALL_BIT |
1356 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1357 emit_xfb_query(&b, index, anv_address_add(query_addr, 16));
1358 #if GFX_VER == 11
1359 /* Running the following CTS pattern on ICL will likely report a failure :
1360 *
1361 * dEQP-VK.transform_feedback.primitives_generated_query.get.queue_reset.32bit.geom.*
1362 *
1363 * If you dump the returned values in genX(GetQueryPoolResults)(), you
1364 * will notice that the last 64bit value is 0 and rereading the value
1365 * once more will return a non-zero value. This seems to indicate that
1366 * the memory writes are not ordered somehow... Otherwise the
1367 * availability write below would ensure the previous writes above have
1368 * completed.
1369 *
1370 * So as a workaround, we stall CS to make sure the previous writes have
1371 * landed before emitting the availability.
1372 */
1373 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1374 cmd_buffer->device->info,
1375 cmd_buffer->state.current_pipeline,
1376 ANV_PIPE_CS_STALL_BIT);
1377 #endif
1378 emit_query_mi_availability(&b, query_addr, true);
1379 break;
1380
1381 case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
1382 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1383 cmd_buffer->device->info,
1384 cmd_buffer->state.current_pipeline,
1385 ANV_PIPE_CS_STALL_BIT |
1386 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1387 cmd_buffer->perf_query_pool = pool;
1388
1389 if (!khr_perf_query_ensure_relocs(cmd_buffer))
1390 return;
1391
1392 const struct anv_physical_device *pdevice = cmd_buffer->device->physical;
1393 const struct intel_perf_query_field_layout *layout = &pdevice->perf->query_layout;
1394
1395 void *dws;
1396 for (uint32_t r = 0; r < layout->n_fields; r++) {
1397 const struct intel_perf_query_field *field = &layout->fields[r];
1398
1399 switch (field->type) {
1400 case INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC:
1401 dws = anv_batch_emitn(&cmd_buffer->batch,
1402 GENX(MI_REPORT_PERF_COUNT_length),
1403 GENX(MI_REPORT_PERF_COUNT),
1404 .MemoryAddress = query_addr /* Will be overwritten */);
1405 mi_resolve_relocated_address_token(
1406 &b,
1407 cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1408 dws + GENX(MI_REPORT_PERF_COUNT_MemoryAddress_start) / 8);
1409 break;
1410
1411 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT:
1412 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT:
1413 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
1414 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
1415 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
1416 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_PEC:
1417 dws =
1418 anv_batch_emitn(&cmd_buffer->batch,
1419 GENX(MI_STORE_REGISTER_MEM_length),
1420 GENX(MI_STORE_REGISTER_MEM),
1421 .RegisterAddress = field->mmio_offset,
1422 .MemoryAddress = query_addr /* Will be overwritten */ );
1423 mi_resolve_relocated_address_token(
1424 &b,
1425 cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1426 dws + GENX(MI_STORE_REGISTER_MEM_MemoryAddress_start) / 8);
1427 if (field->size == 8) {
1428 dws =
1429 anv_batch_emitn(&cmd_buffer->batch,
1430 GENX(MI_STORE_REGISTER_MEM_length),
1431 GENX(MI_STORE_REGISTER_MEM),
1432 .RegisterAddress = field->mmio_offset + 4,
1433 .MemoryAddress = query_addr /* Will be overwritten */ );
1434 mi_resolve_relocated_address_token(
1435 &b,
1436 cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1437 dws + GENX(MI_STORE_REGISTER_MEM_MemoryAddress_start) / 8);
1438 }
1439 break;
1440
1441 default:
1442 unreachable("Invalid query field");
1443 break;
1444 }
1445 }
1446
1447 dws =
1448 anv_batch_emitn(&cmd_buffer->batch,
1449 GENX(MI_STORE_DATA_IMM_length),
1450 GENX(MI_STORE_DATA_IMM),
1451 .ImmediateData = true);
1452 mi_resolve_relocated_address_token(
1453 &b,
1454 cmd_buffer->self_mod_locations[cmd_buffer->perf_reloc_idx++],
1455 dws + GENX(MI_STORE_DATA_IMM_Address_start) / 8);
1456
1457 assert(cmd_buffer->perf_reloc_idx == pdevice->n_perf_query_commands);
1458 break;
1459 }
1460
1461 case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
1462 genx_batch_emit_pipe_control(&cmd_buffer->batch,
1463 cmd_buffer->device->info,
1464 cmd_buffer->state.current_pipeline,
1465 ANV_PIPE_CS_STALL_BIT |
1466 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
1467 uint32_t marker_offset = intel_perf_marker_offset();
1468 mi_store(&b, mi_mem64(anv_address_add(query_addr, marker_offset)),
1469 mi_imm(cmd_buffer->intel_perf_marker));
1470 emit_perf_intel_query(cmd_buffer, pool, &b, query_addr, true);
1471 emit_query_mi_availability(&b, query_addr, true);
1472 break;
1473 }
1474 case VK_QUERY_TYPE_RESULT_STATUS_ONLY_KHR:
1475 emit_query_mi_flush_availability(cmd_buffer, query_addr, true);
1476 break;
1477
1478 #if GFX_VER < 11
1479 #define MFC_BITSTREAM_BYTECOUNT_FRAME_REG 0x128A0
1480 #define HCP_BITSTREAM_BYTECOUNT_FRAME_REG 0x1E9A0
1481 #elif GFX_VER >= 11
1482 #define MFC_BITSTREAM_BYTECOUNT_FRAME_REG 0x1C08A0
1483 #define HCP_BITSTREAM_BYTECOUNT_FRAME_REG 0x1C28A0
1484 #endif
1485
1486 case VK_QUERY_TYPE_VIDEO_ENCODE_FEEDBACK_KHR: {
1487 uint32_t reg_addr;
1488
1489 if (pool->codec & VK_VIDEO_CODEC_OPERATION_ENCODE_H264_BIT_KHR) {
1490 reg_addr = MFC_BITSTREAM_BYTECOUNT_FRAME_REG;
1491 } else if (pool->codec & VK_VIDEO_CODEC_OPERATION_ENCODE_H265_BIT_KHR) {
1492 reg_addr = HCP_BITSTREAM_BYTECOUNT_FRAME_REG;
1493 } else {
1494 unreachable("Invalid codec operation");
1495 }
1496
1497 mi_store(&b, mi_mem64(anv_address_add(query_addr, 8)), mi_reg32(reg_addr));
1498 emit_query_mi_availability(&b, query_addr, true);
1499 break;
1500 }
1501 default:
1502 unreachable("");
1503 }
1504
1505 /* When multiview is active the spec requires that N consecutive query
1506 * indices are used, where N is the number of active views in the subpass.
1507 * The spec allows that we only write the results to one of the queries
1508 * but we still need to manage result availability for all the query indices.
1509 * Since we only emit a single query for all active views in the
1510 * first index, mark the other query indices as being already available
1511 * with result 0.
1512 */
1513 if (cmd_buffer->state.gfx.view_mask) {
1514 const uint32_t num_queries =
1515 util_bitcount(cmd_buffer->state.gfx.view_mask);
1516 if (num_queries > 1)
1517 emit_zero_queries(cmd_buffer, &b, pool, query + 1, num_queries - 1);
1518 }
1519 }
1520
1521 #define TIMESTAMP 0x2358
1522
genX(CmdWriteTimestamp2)1523 void genX(CmdWriteTimestamp2)(
1524 VkCommandBuffer commandBuffer,
1525 VkPipelineStageFlags2 stage,
1526 VkQueryPool queryPool,
1527 uint32_t query)
1528 {
1529 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1530 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1531 struct anv_address query_addr = anv_query_address(pool, query);
1532
1533 assert(pool->vk.query_type == VK_QUERY_TYPE_TIMESTAMP);
1534
1535 emit_query_clear_flush(cmd_buffer, pool,
1536 "CmdWriteTimestamp flush query clears");
1537
1538 struct mi_builder b;
1539 mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
1540
1541 if (stage == VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT) {
1542 mi_store(&b, mi_mem64(anv_address_add(query_addr, 8)),
1543 mi_reg64(TIMESTAMP));
1544 emit_query_mi_availability(&b, query_addr, true);
1545 } else {
1546 /* Everything else is bottom-of-pipe */
1547 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
1548 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1549
1550 bool cs_stall_needed =
1551 (GFX_VER == 9 && cmd_buffer->device->info->gt == 4);
1552
1553 if (anv_cmd_buffer_is_blitter_queue(cmd_buffer) ||
1554 anv_cmd_buffer_is_video_queue(cmd_buffer)) {
1555 /* Wa_16018063123 - emit fast color dummy blit before MI_FLUSH_DW. */
1556 if (intel_needs_workaround(cmd_buffer->device->info, 16018063123)) {
1557 genX(batch_emit_fast_color_dummy_blit)(&cmd_buffer->batch,
1558 cmd_buffer->device);
1559 }
1560 anv_batch_emit(&cmd_buffer->batch, GENX(MI_FLUSH_DW), dw) {
1561 dw.Address = anv_address_add(query_addr, 8);
1562 dw.PostSyncOperation = WriteTimestamp;
1563 }
1564 emit_query_mi_flush_availability(cmd_buffer, query_addr, true);
1565 } else {
1566 genx_batch_emit_pipe_control_write
1567 (&cmd_buffer->batch, cmd_buffer->device->info,
1568 cmd_buffer->state.current_pipeline, WriteTimestamp,
1569 anv_address_add(query_addr, 8), 0,
1570 cs_stall_needed ? ANV_PIPE_CS_STALL_BIT : 0);
1571 emit_query_pc_availability(cmd_buffer, query_addr, true);
1572 }
1573
1574 }
1575
1576
1577 /* When multiview is active the spec requires that N consecutive query
1578 * indices are used, where N is the number of active views in the subpass.
1579 * The spec allows that we only write the results to one of the queries
1580 * but we still need to manage result availability for all the query indices.
1581 * Since we only emit a single query for all active views in the
1582 * first index, mark the other query indices as being already available
1583 * with result 0.
1584 */
1585 if (cmd_buffer->state.gfx.view_mask) {
1586 const uint32_t num_queries =
1587 util_bitcount(cmd_buffer->state.gfx.view_mask);
1588 if (num_queries > 1)
1589 emit_zero_queries(cmd_buffer, &b, pool, query + 1, num_queries - 1);
1590 }
1591 }
1592
1593 #define MI_PREDICATE_SRC0 0x2400
1594 #define MI_PREDICATE_SRC1 0x2408
1595 #define MI_PREDICATE_RESULT 0x2418
1596
1597 /**
1598 * Writes the results of a query to dst_addr is the value at poll_addr is equal
1599 * to the reference value.
1600 */
1601 static void
gpu_write_query_result_cond(struct anv_cmd_buffer * cmd_buffer,struct mi_builder * b,struct anv_address poll_addr,struct anv_address dst_addr,uint64_t ref_value,VkQueryResultFlags flags,uint32_t value_index,struct mi_value query_result)1602 gpu_write_query_result_cond(struct anv_cmd_buffer *cmd_buffer,
1603 struct mi_builder *b,
1604 struct anv_address poll_addr,
1605 struct anv_address dst_addr,
1606 uint64_t ref_value,
1607 VkQueryResultFlags flags,
1608 uint32_t value_index,
1609 struct mi_value query_result)
1610 {
1611 mi_store(b, mi_reg64(MI_PREDICATE_SRC0), mi_mem64(poll_addr));
1612 mi_store(b, mi_reg64(MI_PREDICATE_SRC1), mi_imm(ref_value));
1613 anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
1614 mip.LoadOperation = LOAD_LOAD;
1615 mip.CombineOperation = COMBINE_SET;
1616 mip.CompareOperation = COMPARE_SRCS_EQUAL;
1617 }
1618
1619 if (flags & VK_QUERY_RESULT_64_BIT) {
1620 struct anv_address res_addr = anv_address_add(dst_addr, value_index * 8);
1621 mi_store_if(b, mi_mem64(res_addr), query_result);
1622 } else {
1623 struct anv_address res_addr = anv_address_add(dst_addr, value_index * 4);
1624 mi_store_if(b, mi_mem32(res_addr), query_result);
1625 }
1626 }
1627
1628 static void
gpu_write_query_result(struct mi_builder * b,struct anv_address dst_addr,VkQueryResultFlags flags,uint32_t value_index,struct mi_value query_result)1629 gpu_write_query_result(struct mi_builder *b,
1630 struct anv_address dst_addr,
1631 VkQueryResultFlags flags,
1632 uint32_t value_index,
1633 struct mi_value query_result)
1634 {
1635 if (flags & VK_QUERY_RESULT_64_BIT) {
1636 struct anv_address res_addr = anv_address_add(dst_addr, value_index * 8);
1637 mi_store(b, mi_mem64(res_addr), query_result);
1638 } else {
1639 struct anv_address res_addr = anv_address_add(dst_addr, value_index * 4);
1640 mi_store(b, mi_mem32(res_addr), query_result);
1641 }
1642 }
1643
1644 static struct mi_value
compute_query_result(struct mi_builder * b,struct anv_address addr)1645 compute_query_result(struct mi_builder *b, struct anv_address addr)
1646 {
1647 return mi_isub(b, mi_mem64(anv_address_add(addr, 8)),
1648 mi_mem64(anv_address_add(addr, 0)));
1649 }
1650
1651 static void
copy_query_results_with_cs(struct anv_cmd_buffer * cmd_buffer,struct anv_query_pool * pool,struct anv_address dest_addr,uint64_t dest_stride,uint32_t first_query,uint32_t query_count,VkQueryResultFlags flags)1652 copy_query_results_with_cs(struct anv_cmd_buffer *cmd_buffer,
1653 struct anv_query_pool *pool,
1654 struct anv_address dest_addr,
1655 uint64_t dest_stride,
1656 uint32_t first_query,
1657 uint32_t query_count,
1658 VkQueryResultFlags flags)
1659 {
1660 enum anv_pipe_bits needed_flushes = 0;
1661
1662 trace_intel_begin_query_copy_cs(&cmd_buffer->trace);
1663
1664 /* If render target writes are ongoing, request a render target cache flush
1665 * to ensure proper ordering of the commands from the 3d pipe and the
1666 * command streamer.
1667 */
1668
1669 const enum anv_query_bits query_bits =
1670 cmd_buffer->state.queries.buffer_write_bits |
1671 cmd_buffer->state.queries.clear_bits;
1672
1673 needed_flushes |= ANV_PIPE_QUERY_BITS(query_bits);
1674
1675 /* Occlusion & timestamp queries are written using a PIPE_CONTROL and
1676 * because we're about to copy values from MI commands, we need to stall
1677 * the command streamer to make sure the PIPE_CONTROL values have
1678 * landed, otherwise we could see inconsistent values & availability.
1679 *
1680 * From the vulkan spec:
1681 *
1682 * "vkCmdCopyQueryPoolResults is guaranteed to see the effect of
1683 * previous uses of vkCmdResetQueryPool in the same queue, without any
1684 * additional synchronization."
1685 */
1686 if (pool->vk.query_type == VK_QUERY_TYPE_OCCLUSION ||
1687 pool->vk.query_type == VK_QUERY_TYPE_TIMESTAMP)
1688 needed_flushes |= ANV_PIPE_CS_STALL_BIT;
1689
1690 if (needed_flushes) {
1691 anv_add_pending_pipe_bits(cmd_buffer,
1692 needed_flushes,
1693 "CopyQueryPoolResults");
1694 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1695 }
1696
1697 struct mi_builder b;
1698 mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
1699 mi_builder_set_mocs(&b, anv_mocs_for_address(
1700 cmd_buffer->device,
1701 &(struct anv_address) { .bo = pool->bo }));
1702
1703 for (uint32_t i = 0; i < query_count; i++) {
1704 struct anv_address query_addr = anv_query_address(pool, first_query + i);
1705 struct mi_value result;
1706
1707 /* Wait for the availability write to land before we go read the data */
1708 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1709 anv_batch_emit(&cmd_buffer->batch, GENX(MI_SEMAPHORE_WAIT), sem) {
1710 sem.WaitMode = PollingMode;
1711 sem.CompareOperation = COMPARE_SAD_EQUAL_SDD;
1712 sem.SemaphoreDataDword = true;
1713 sem.SemaphoreAddress = query_addr;
1714 }
1715 }
1716
1717 uint32_t idx = 0;
1718 switch (pool->vk.query_type) {
1719 case VK_QUERY_TYPE_OCCLUSION:
1720 case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
1721 #if GFX_VERx10 >= 125
1722 case VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT:
1723 #endif
1724 result = compute_query_result(&b, anv_address_add(query_addr, 8));
1725 /* Like in the case of vkGetQueryPoolResults, if the query is
1726 * unavailable and the VK_QUERY_RESULT_PARTIAL_BIT flag is set,
1727 * conservatively write 0 as the query result. If the
1728 * VK_QUERY_RESULT_PARTIAL_BIT isn't set, don't write any value.
1729 */
1730 gpu_write_query_result_cond(cmd_buffer, &b, query_addr, dest_addr,
1731 1 /* available */, flags, idx, result);
1732 if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
1733 gpu_write_query_result_cond(cmd_buffer, &b, query_addr, dest_addr,
1734 0 /* unavailable */, flags, idx, mi_imm(0));
1735 }
1736 idx++;
1737 break;
1738
1739 case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
1740 uint32_t statistics = pool->vk.pipeline_statistics;
1741 while (statistics) {
1742 UNUSED uint32_t stat = u_bit_scan(&statistics);
1743 result = compute_query_result(&b, anv_address_add(query_addr,
1744 idx * 16 + 8));
1745 gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1746 }
1747 assert(idx == util_bitcount(pool->vk.pipeline_statistics));
1748 break;
1749 }
1750
1751 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1752 result = compute_query_result(&b, anv_address_add(query_addr, 8));
1753 gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1754 result = compute_query_result(&b, anv_address_add(query_addr, 24));
1755 gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1756 break;
1757
1758 case VK_QUERY_TYPE_TIMESTAMP:
1759 result = mi_mem64(anv_address_add(query_addr, 8));
1760 gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1761 break;
1762
1763 #if GFX_VERx10 >= 125
1764 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
1765 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
1766 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
1767 result = mi_mem64(anv_address_add(query_addr, 8));
1768 gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1769 break;
1770
1771 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR:
1772 result = mi_mem64(anv_address_add(query_addr, 16));
1773 gpu_write_query_result(&b, dest_addr, flags, idx++, result);
1774 break;
1775 #endif
1776
1777 case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR:
1778 unreachable("Copy KHR performance query results not implemented");
1779 break;
1780
1781 default:
1782 unreachable("unhandled query type");
1783 }
1784
1785 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1786 gpu_write_query_result(&b, dest_addr, flags, idx,
1787 mi_mem64(query_addr));
1788 }
1789
1790 dest_addr = anv_address_add(dest_addr, dest_stride);
1791 }
1792
1793 trace_intel_end_query_copy_cs(&cmd_buffer->trace, query_count);
1794 }
1795
1796 static void
copy_query_results_with_shader(struct anv_cmd_buffer * cmd_buffer,struct anv_query_pool * pool,struct anv_address dest_addr,uint64_t dest_stride,uint32_t first_query,uint32_t query_count,VkQueryResultFlags flags)1797 copy_query_results_with_shader(struct anv_cmd_buffer *cmd_buffer,
1798 struct anv_query_pool *pool,
1799 struct anv_address dest_addr,
1800 uint64_t dest_stride,
1801 uint32_t first_query,
1802 uint32_t query_count,
1803 VkQueryResultFlags flags)
1804 {
1805 struct anv_device *device = cmd_buffer->device;
1806 enum anv_pipe_bits needed_flushes = 0;
1807
1808 trace_intel_begin_query_copy_shader(&cmd_buffer->trace);
1809
1810 /* Ensure all query MI writes are visible to the shader */
1811 struct mi_builder b;
1812 mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
1813 mi_ensure_write_fence(&b);
1814
1815 /* If this is the first command in the batch buffer, make sure we have
1816 * consistent pipeline mode.
1817 */
1818 if (cmd_buffer->state.current_pipeline == UINT32_MAX)
1819 genX(flush_pipeline_select_3d)(cmd_buffer);
1820
1821 if ((cmd_buffer->state.queries.buffer_write_bits |
1822 cmd_buffer->state.queries.clear_bits) & ANV_QUERY_WRITES_RT_FLUSH)
1823 needed_flushes |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
1824
1825 if ((cmd_buffer->state.queries.buffer_write_bits |
1826 cmd_buffer->state.queries.clear_bits) & ANV_QUERY_WRITES_DATA_FLUSH) {
1827 needed_flushes |= (ANV_PIPE_HDC_PIPELINE_FLUSH_BIT |
1828 ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT);
1829 }
1830
1831 /* Flushes for the queries to complete */
1832 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1833 /* Some queries are done with shaders, so we need to have them flush
1834 * high level caches writes. The L3 should be shared across the GPU.
1835 */
1836 if (pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR ||
1837 pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR ||
1838 pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR ||
1839 pool->vk.query_type == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR) {
1840 needed_flushes |= ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT;
1841 }
1842 /* And we need to stall for previous CS writes to land or the flushes to
1843 * complete.
1844 */
1845 needed_flushes |= ANV_PIPE_CS_STALL_BIT;
1846 }
1847
1848 /* Occlusion & timestamp queries are written using a PIPE_CONTROL and
1849 * because we're about to copy values from MI commands, we need to stall
1850 * the command streamer to make sure the PIPE_CONTROL values have
1851 * landed, otherwise we could see inconsistent values & availability.
1852 *
1853 * From the vulkan spec:
1854 *
1855 * "vkCmdCopyQueryPoolResults is guaranteed to see the effect of
1856 * previous uses of vkCmdResetQueryPool in the same queue, without any
1857 * additional synchronization."
1858 */
1859 if (pool->vk.query_type == VK_QUERY_TYPE_OCCLUSION ||
1860 pool->vk.query_type == VK_QUERY_TYPE_TIMESTAMP)
1861 needed_flushes |= ANV_PIPE_CS_STALL_BIT;
1862
1863 if (needed_flushes) {
1864 anv_add_pending_pipe_bits(cmd_buffer,
1865 needed_flushes | ANV_PIPE_END_OF_PIPE_SYNC_BIT,
1866 "CopyQueryPoolResults");
1867 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1868 }
1869
1870 struct anv_shader_bin *copy_kernel;
1871 VkResult ret =
1872 anv_device_get_internal_shader(
1873 cmd_buffer->device,
1874 cmd_buffer->state.current_pipeline == GPGPU ?
1875 ANV_INTERNAL_KERNEL_COPY_QUERY_RESULTS_COMPUTE :
1876 ANV_INTERNAL_KERNEL_COPY_QUERY_RESULTS_FRAGMENT,
1877 ©_kernel);
1878 if (ret != VK_SUCCESS) {
1879 anv_batch_set_error(&cmd_buffer->batch, ret);
1880 return;
1881 }
1882
1883 struct anv_simple_shader state = {
1884 .device = cmd_buffer->device,
1885 .cmd_buffer = cmd_buffer,
1886 .dynamic_state_stream = &cmd_buffer->dynamic_state_stream,
1887 .general_state_stream = &cmd_buffer->general_state_stream,
1888 .batch = &cmd_buffer->batch,
1889 .kernel = copy_kernel,
1890 .l3_config = device->internal_kernels_l3_config,
1891 .urb_cfg = &cmd_buffer->state.gfx.urb_cfg,
1892 };
1893 genX(emit_simple_shader_init)(&state);
1894
1895 struct anv_state push_data_state =
1896 genX(simple_shader_alloc_push)(&state,
1897 sizeof(struct anv_query_copy_params));
1898 if (push_data_state.map == NULL)
1899 return;
1900
1901 struct anv_query_copy_params *params = push_data_state.map;
1902
1903 uint32_t copy_flags =
1904 ((flags & VK_QUERY_RESULT_64_BIT) ? ANV_COPY_QUERY_FLAG_RESULT64 : 0) |
1905 ((flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) ? ANV_COPY_QUERY_FLAG_AVAILABLE : 0);
1906
1907 uint32_t num_items = 1;
1908 uint32_t data_offset = 8 /* behind availability */;
1909 switch (pool->vk.query_type) {
1910 case VK_QUERY_TYPE_OCCLUSION:
1911 copy_flags |= ANV_COPY_QUERY_FLAG_DELTA;
1912 /* Occlusion and timestamps queries are the only ones where we would have partial data
1913 * because they are capture with a PIPE_CONTROL post sync operation. The
1914 * other ones are captured with MI_STORE_REGISTER_DATA so we're always
1915 * available by the time we reach the copy command.
1916 */
1917 copy_flags |= (flags & VK_QUERY_RESULT_PARTIAL_BIT) ? ANV_COPY_QUERY_FLAG_PARTIAL : 0;
1918 break;
1919
1920 case VK_QUERY_TYPE_TIMESTAMP:
1921 copy_flags |= (flags & VK_QUERY_RESULT_PARTIAL_BIT) ? ANV_COPY_QUERY_FLAG_PARTIAL : 0;
1922 break;
1923
1924 case VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT:
1925 #if GFX_VERx10 >= 125
1926 case VK_QUERY_TYPE_MESH_PRIMITIVES_GENERATED_EXT:
1927 #endif
1928 copy_flags |= ANV_COPY_QUERY_FLAG_DELTA;
1929 break;
1930
1931 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1932 num_items = util_bitcount(pool->vk.pipeline_statistics);
1933 copy_flags |= ANV_COPY_QUERY_FLAG_DELTA;
1934 break;
1935
1936 case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
1937 num_items = 2;
1938 copy_flags |= ANV_COPY_QUERY_FLAG_DELTA;
1939 break;
1940
1941 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
1942 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
1943 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
1944 break;
1945
1946 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR:
1947 data_offset += 8;
1948 break;
1949
1950 default:
1951 unreachable("unhandled query type");
1952 }
1953
1954 *params = (struct anv_query_copy_params) {
1955 .flags = copy_flags,
1956 .num_queries = query_count,
1957 .num_items = num_items,
1958 .query_base = first_query,
1959 .query_stride = pool->stride,
1960 .query_data_offset = data_offset,
1961 .destination_stride = dest_stride,
1962 .query_data_addr = anv_address_physical(
1963 (struct anv_address) {
1964 .bo = pool->bo,
1965 }),
1966 .destination_addr = anv_address_physical(dest_addr),
1967 };
1968
1969 genX(emit_simple_shader_dispatch)(&state, query_count, push_data_state);
1970
1971 /* The query copy result shader is writing using the dataport, flush
1972 * HDC/Data cache depending on the generation. Also stall at pixel
1973 * scoreboard in case we're doing the copy with a fragment shader.
1974 */
1975 cmd_buffer->state.queries.buffer_write_bits |= ANV_QUERY_WRITES_DATA_FLUSH;
1976
1977 trace_intel_end_query_copy_shader(&cmd_buffer->trace, query_count);
1978 }
1979
genX(CmdCopyQueryPoolResults)1980 void genX(CmdCopyQueryPoolResults)(
1981 VkCommandBuffer commandBuffer,
1982 VkQueryPool queryPool,
1983 uint32_t firstQuery,
1984 uint32_t queryCount,
1985 VkBuffer destBuffer,
1986 VkDeviceSize destOffset,
1987 VkDeviceSize destStride,
1988 VkQueryResultFlags flags)
1989 {
1990 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1991 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1992 ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
1993 struct anv_device *device = cmd_buffer->device;
1994 struct anv_physical_device *pdevice = device->physical;
1995
1996 if (queryCount > pdevice->instance->query_copy_with_shader_threshold) {
1997 copy_query_results_with_shader(cmd_buffer, pool,
1998 anv_address_add(buffer->address,
1999 destOffset),
2000 destStride,
2001 firstQuery,
2002 queryCount,
2003 flags);
2004 } else {
2005 copy_query_results_with_cs(cmd_buffer, pool,
2006 anv_address_add(buffer->address,
2007 destOffset),
2008 destStride,
2009 firstQuery,
2010 queryCount,
2011 flags);
2012 }
2013 }
2014
2015 #if GFX_VERx10 >= 125 && ANV_SUPPORT_RT
2016
2017 #include "grl/include/GRLRTASCommon.h"
2018 #include "grl/grl_metakernel_postbuild_info.h"
2019
2020 void
genX(CmdWriteAccelerationStructuresPropertiesKHR)2021 genX(CmdWriteAccelerationStructuresPropertiesKHR)(
2022 VkCommandBuffer commandBuffer,
2023 uint32_t accelerationStructureCount,
2024 const VkAccelerationStructureKHR* pAccelerationStructures,
2025 VkQueryType queryType,
2026 VkQueryPool queryPool,
2027 uint32_t firstQuery)
2028 {
2029 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2030 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
2031
2032 assert(queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR ||
2033 queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR ||
2034 queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR ||
2035 queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR);
2036
2037 emit_query_clear_flush(cmd_buffer, pool,
2038 "CmdWriteAccelerationStructuresPropertiesKHR flush query clears");
2039
2040 struct mi_builder b;
2041 mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
2042
2043 for (uint32_t i = 0; i < accelerationStructureCount; i++) {
2044 ANV_FROM_HANDLE(vk_acceleration_structure, accel, pAccelerationStructures[i]);
2045 struct anv_address query_addr =
2046 anv_address_add(anv_query_address(pool, firstQuery + i), 8);
2047
2048 switch (queryType) {
2049 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR:
2050 genX(grl_postbuild_info_compacted_size)(cmd_buffer,
2051 vk_acceleration_structure_get_va(accel),
2052 anv_address_physical(query_addr));
2053 break;
2054
2055 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SIZE_KHR:
2056 genX(grl_postbuild_info_current_size)(cmd_buffer,
2057 vk_acceleration_structure_get_va(accel),
2058 anv_address_physical(query_addr));
2059 break;
2060
2061 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
2062 case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_BOTTOM_LEVEL_POINTERS_KHR:
2063 genX(grl_postbuild_info_serialized_size)(cmd_buffer,
2064 vk_acceleration_structure_get_va(accel),
2065 anv_address_physical(query_addr));
2066 break;
2067
2068 default:
2069 unreachable("unhandled query type");
2070 }
2071 }
2072
2073 /* TODO: Figure out why MTL needs ANV_PIPE_DATA_CACHE_FLUSH_BIT in order
2074 * to not lose the availability bit.
2075 */
2076 anv_add_pending_pipe_bits(cmd_buffer,
2077 ANV_PIPE_END_OF_PIPE_SYNC_BIT |
2078 ANV_PIPE_DATA_CACHE_FLUSH_BIT,
2079 "after write acceleration struct props");
2080 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
2081
2082 for (uint32_t i = 0; i < accelerationStructureCount; i++)
2083 emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), true);
2084 }
2085 #endif
2086