xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/iris/iris_query.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 /**
24  * @file iris_query.c
25  *
26  * ============================= GENXML CODE =============================
27  *              [This file is compiled once per generation.]
28  * =======================================================================
29  *
30  * Query object support.  This allows measuring various simple statistics
31  * via counters on the GPU.  We use GenX code for MI_MATH calculations.
32  */
33 
34 #include <stdio.h>
35 #include <errno.h>
36 #include "pipe/p_defines.h"
37 #include "pipe/p_state.h"
38 #include "pipe/p_context.h"
39 #include "pipe/p_screen.h"
40 #include "util/u_inlines.h"
41 #include "util/u_upload_mgr.h"
42 #include "iris_context.h"
43 #include "iris_defines.h"
44 #include "iris_fence.h"
45 #include "iris_monitor.h"
46 #include "iris_resource.h"
47 #include "iris_screen.h"
48 
49 #include "iris_genx_macros.h"
50 
51 #define SO_PRIM_STORAGE_NEEDED(n) (GENX(SO_PRIM_STORAGE_NEEDED0_num) + (n) * 8)
52 #define SO_NUM_PRIMS_WRITTEN(n)   (GENX(SO_NUM_PRIMS_WRITTEN0_num) + (n) * 8)
53 
54 struct iris_query {
55    struct threaded_query b;
56 
57    enum pipe_query_type type;
58    int index;
59 
60    bool ready;
61 
62    bool stalled;
63 
64    uint64_t result;
65 
66    struct iris_state_ref query_state_ref;
67    struct iris_query_snapshots *map;
68    struct iris_syncobj *syncobj;
69 
70    int batch_idx;
71 
72    struct iris_monitor_object *monitor;
73 
74    /* Fence for PIPE_QUERY_GPU_FINISHED. */
75    struct pipe_fence_handle *fence;
76 };
77 
78 struct iris_query_snapshots {
79    /** iris_render_condition's saved MI_PREDICATE_RESULT value. */
80    uint64_t predicate_result;
81 
82    /** Have the start/end snapshots landed? */
83    uint64_t snapshots_landed;
84 
85    /** Starting and ending counter snapshots */
86    uint64_t start;
87    uint64_t end;
88 };
89 
90 struct iris_query_so_overflow {
91    uint64_t predicate_result;
92    uint64_t snapshots_landed;
93 
94    struct {
95       uint64_t prim_storage_needed[2];
96       uint64_t num_prims[2];
97    } stream[4];
98 };
99 
100 static struct mi_value
query_mem64(struct iris_query * q,uint32_t offset)101 query_mem64(struct iris_query *q, uint32_t offset)
102 {
103    struct iris_address addr = {
104       .bo = iris_resource_bo(q->query_state_ref.res),
105       .offset = q->query_state_ref.offset + offset,
106       .access = IRIS_DOMAIN_OTHER_WRITE
107    };
108    return mi_mem64(addr);
109 }
110 
111 /**
112  * Is this type of query written by PIPE_CONTROL?
113  */
114 static bool
iris_is_query_pipelined(struct iris_query * q)115 iris_is_query_pipelined(struct iris_query *q)
116 {
117    switch (q->type) {
118    case PIPE_QUERY_OCCLUSION_COUNTER:
119    case PIPE_QUERY_OCCLUSION_PREDICATE:
120    case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
121    case PIPE_QUERY_TIMESTAMP:
122    case PIPE_QUERY_TIMESTAMP_DISJOINT:
123    case PIPE_QUERY_TIME_ELAPSED:
124       return true;
125 
126    default:
127       return false;
128    }
129 }
130 
131 static void
mark_available(struct iris_context * ice,struct iris_query * q)132 mark_available(struct iris_context *ice, struct iris_query *q)
133 {
134    struct iris_batch *batch = &ice->batches[q->batch_idx];
135    unsigned flags = PIPE_CONTROL_WRITE_IMMEDIATE;
136    unsigned offset = offsetof(struct iris_query_snapshots, snapshots_landed);
137    struct iris_bo *bo = iris_resource_bo(q->query_state_ref.res);
138    offset += q->query_state_ref.offset;
139 
140    if (!iris_is_query_pipelined(q)) {
141       batch->screen->vtbl.store_data_imm64(batch, bo, offset, true);
142    } else {
143       /* Order available *after* the query results. */
144       flags |= PIPE_CONTROL_FLUSH_ENABLE;
145       iris_emit_pipe_control_write(batch, "query: mark available",
146                                    flags, bo, offset, true);
147    }
148 }
149 
150 /**
151  * Write PS_DEPTH_COUNT to q->(dest) via a PIPE_CONTROL.
152  */
153 static void
iris_pipelined_write(struct iris_batch * batch,struct iris_query * q,enum pipe_control_flags flags,unsigned offset)154 iris_pipelined_write(struct iris_batch *batch,
155                      struct iris_query *q,
156                      enum pipe_control_flags flags,
157                      unsigned offset)
158 {
159    const struct intel_device_info *devinfo = batch->screen->devinfo;
160    const unsigned optional_cs_stall =
161       GFX_VER == 9 && devinfo->gt == 4 ?  PIPE_CONTROL_CS_STALL : 0;
162    struct iris_bo *bo = iris_resource_bo(q->query_state_ref.res);
163 
164    iris_emit_pipe_control_write(batch, "query: pipelined snapshot write",
165                                 flags | optional_cs_stall,
166                                 bo, offset, 0ull);
167 }
168 
169 static void
write_value(struct iris_context * ice,struct iris_query * q,unsigned offset)170 write_value(struct iris_context *ice, struct iris_query *q, unsigned offset)
171 {
172    struct iris_batch *batch = &ice->batches[q->batch_idx];
173    struct iris_bo *bo = iris_resource_bo(q->query_state_ref.res);
174 
175    if (!iris_is_query_pipelined(q)) {
176       enum pipe_control_flags flags = PIPE_CONTROL_CS_STALL |
177                                       PIPE_CONTROL_STALL_AT_SCOREBOARD;
178       if (batch->name == IRIS_BATCH_COMPUTE) {
179          iris_emit_pipe_control_write(batch,
180                                       "query: write immediate for compute batches",
181                                       PIPE_CONTROL_WRITE_IMMEDIATE,
182                                       bo,
183                                       offset,
184                                       0ull);
185          flags = PIPE_CONTROL_FLUSH_ENABLE;
186       }
187 
188       iris_emit_pipe_control_flush(batch,
189                                    "query: non-pipelined snapshot write",
190                                    flags);
191       q->stalled = true;
192    }
193 
194    switch (q->type) {
195    case PIPE_QUERY_OCCLUSION_COUNTER:
196    case PIPE_QUERY_OCCLUSION_PREDICATE:
197    case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
198       if (GFX_VER >= 10) {
199          /* "Driver must program PIPE_CONTROL with only Depth Stall Enable
200           *  bit set prior to programming a PIPE_CONTROL with Write PS Depth
201           *  Count sync operation."
202           */
203          iris_emit_pipe_control_flush(batch,
204                                       "workaround: depth stall before writing "
205                                       "PS_DEPTH_COUNT",
206                                       PIPE_CONTROL_DEPTH_STALL);
207       }
208       iris_pipelined_write(&ice->batches[IRIS_BATCH_RENDER], q,
209                            PIPE_CONTROL_WRITE_DEPTH_COUNT |
210                            PIPE_CONTROL_DEPTH_STALL,
211                            offset);
212       break;
213    case PIPE_QUERY_TIME_ELAPSED:
214    case PIPE_QUERY_TIMESTAMP:
215    case PIPE_QUERY_TIMESTAMP_DISJOINT:
216       iris_pipelined_write(&ice->batches[IRIS_BATCH_RENDER], q,
217                            PIPE_CONTROL_WRITE_TIMESTAMP,
218                            offset);
219       break;
220    case PIPE_QUERY_PRIMITIVES_GENERATED:
221       batch->screen->vtbl.store_register_mem64(batch,
222                                      q->index == 0 ?
223                                      GENX(CL_INVOCATION_COUNT_num) :
224                                      SO_PRIM_STORAGE_NEEDED(q->index),
225                                      bo, offset, false);
226       break;
227    case PIPE_QUERY_PRIMITIVES_EMITTED:
228       batch->screen->vtbl.store_register_mem64(batch,
229                                      SO_NUM_PRIMS_WRITTEN(q->index),
230                                      bo, offset, false);
231       break;
232    case PIPE_QUERY_PIPELINE_STATISTICS_SINGLE: {
233       static const uint32_t index_to_reg[] = {
234          GENX(IA_VERTICES_COUNT_num),
235          GENX(IA_PRIMITIVES_COUNT_num),
236          GENX(VS_INVOCATION_COUNT_num),
237          GENX(GS_INVOCATION_COUNT_num),
238          GENX(GS_PRIMITIVES_COUNT_num),
239          GENX(CL_INVOCATION_COUNT_num),
240          GENX(CL_PRIMITIVES_COUNT_num),
241          GENX(PS_INVOCATION_COUNT_num),
242          GENX(HS_INVOCATION_COUNT_num),
243          GENX(DS_INVOCATION_COUNT_num),
244          GENX(CS_INVOCATION_COUNT_num),
245       };
246       const uint32_t reg = index_to_reg[q->index];
247 
248       batch->screen->vtbl.store_register_mem64(batch, reg, bo, offset, false);
249       break;
250    }
251    default:
252       assert(false);
253    }
254 }
255 
256 static void
write_overflow_values(struct iris_context * ice,struct iris_query * q,bool end)257 write_overflow_values(struct iris_context *ice, struct iris_query *q, bool end)
258 {
259    struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
260    uint32_t count = q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ? 1 : 4;
261    struct iris_bo *bo = iris_resource_bo(q->query_state_ref.res);
262    uint32_t offset = q->query_state_ref.offset;
263 
264    iris_emit_pipe_control_flush(batch,
265                                 "query: write SO overflow snapshots",
266                                 PIPE_CONTROL_CS_STALL |
267                                 PIPE_CONTROL_STALL_AT_SCOREBOARD);
268    for (uint32_t i = 0; i < count; i++) {
269       int s = q->index + i;
270       int g_idx = offset + offsetof(struct iris_query_so_overflow,
271                            stream[s].num_prims[end]);
272       int w_idx = offset + offsetof(struct iris_query_so_overflow,
273                            stream[s].prim_storage_needed[end]);
274       batch->screen->vtbl.store_register_mem64(batch, SO_NUM_PRIMS_WRITTEN(s),
275                                      bo, g_idx, false);
276       batch->screen->vtbl.store_register_mem64(batch, SO_PRIM_STORAGE_NEEDED(s),
277                                      bo, w_idx, false);
278    }
279 }
280 
281 static uint64_t
iris_raw_timestamp_delta(uint64_t time0,uint64_t time1)282 iris_raw_timestamp_delta(uint64_t time0, uint64_t time1)
283 {
284    if (time0 > time1) {
285       return (1ull << 36) + time1 - time0;
286    } else {
287       return time1 - time0;
288    }
289 }
290 
291 static bool
stream_overflowed(struct iris_query_so_overflow * so,int s)292 stream_overflowed(struct iris_query_so_overflow *so, int s)
293 {
294    return (so->stream[s].prim_storage_needed[1] -
295            so->stream[s].prim_storage_needed[0]) !=
296           (so->stream[s].num_prims[1] - so->stream[s].num_prims[0]);
297 }
298 
299 static void
calculate_result_on_cpu(const struct intel_device_info * devinfo,struct iris_query * q)300 calculate_result_on_cpu(const struct intel_device_info *devinfo,
301                         struct iris_query *q)
302 {
303    switch (q->type) {
304    case PIPE_QUERY_OCCLUSION_PREDICATE:
305    case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
306       q->result = q->map->end != q->map->start;
307       break;
308    case PIPE_QUERY_TIMESTAMP:
309    case PIPE_QUERY_TIMESTAMP_DISJOINT:
310       /* The timestamp is the single starting snapshot. */
311       q->result = intel_device_info_timebase_scale(devinfo, q->map->start);
312       break;
313    case PIPE_QUERY_TIME_ELAPSED:
314       q->result = iris_raw_timestamp_delta(q->map->start, q->map->end);
315       q->result = intel_device_info_timebase_scale(devinfo, q->result);
316       break;
317    case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
318       q->result = stream_overflowed((void *) q->map, q->index);
319       break;
320    case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
321       q->result = false;
322       for (int i = 0; i < PIPE_MAX_VERTEX_STREAMS; i++)
323          q->result |= stream_overflowed((void *) q->map, i);
324       break;
325    case PIPE_QUERY_PIPELINE_STATISTICS_SINGLE:
326       q->result = q->map->end - q->map->start;
327 
328       /* WaDividePSInvocationCountBy4:HSW,BDW */
329       if (GFX_VER == 8 && q->index == PIPE_STAT_QUERY_PS_INVOCATIONS)
330          q->result /= 4;
331       break;
332    case PIPE_QUERY_OCCLUSION_COUNTER:
333    case PIPE_QUERY_PRIMITIVES_GENERATED:
334    case PIPE_QUERY_PRIMITIVES_EMITTED:
335    default:
336       q->result = q->map->end - q->map->start;
337       break;
338    }
339 
340    q->ready = true;
341 }
342 
343 /**
344  * Calculate the streamout overflow for stream \p idx:
345  *
346  * (num_prims[1] - num_prims[0]) - (storage_needed[1] - storage_needed[0])
347  */
348 static struct mi_value
calc_overflow_for_stream(struct mi_builder * b,struct iris_query * q,int idx)349 calc_overflow_for_stream(struct mi_builder *b,
350                          struct iris_query *q,
351                          int idx)
352 {
353 #define C(counter, i) query_mem64(q, \
354    offsetof(struct iris_query_so_overflow, stream[idx].counter[i]))
355 
356    return mi_isub(b, mi_isub(b, C(num_prims, 1), C(num_prims, 0)),
357                      mi_isub(b, C(prim_storage_needed, 1),
358                                 C(prim_storage_needed, 0)));
359 #undef C
360 }
361 
362 /**
363  * Calculate whether any stream has overflowed.
364  */
365 static struct mi_value
calc_overflow_any_stream(struct mi_builder * b,struct iris_query * q)366 calc_overflow_any_stream(struct mi_builder *b, struct iris_query *q)
367 {
368    struct mi_value stream_result[PIPE_MAX_VERTEX_STREAMS];
369    for (int i = 0; i < PIPE_MAX_VERTEX_STREAMS; i++)
370       stream_result[i] = calc_overflow_for_stream(b, q, i);
371 
372    struct mi_value result = stream_result[0];
373    for (int i = 1; i < PIPE_MAX_VERTEX_STREAMS; i++)
374       result = mi_ior(b, result, stream_result[i]);
375 
376    return result;
377 }
378 
379 static bool
query_is_boolean(enum pipe_query_type type)380 query_is_boolean(enum pipe_query_type type)
381 {
382    switch (type) {
383    case PIPE_QUERY_OCCLUSION_PREDICATE:
384    case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
385    case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
386    case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
387       return true;
388    default:
389       return false;
390    }
391 }
392 
393 /**
394  * Calculate the result using MI_MATH.
395  */
396 static struct mi_value
calculate_result_on_gpu(const struct intel_device_info * devinfo,struct mi_builder * b,struct iris_query * q)397 calculate_result_on_gpu(const struct intel_device_info *devinfo,
398                         struct mi_builder *b,
399                         struct iris_query *q)
400 {
401    struct mi_value result;
402    struct mi_value start_val =
403       query_mem64(q, offsetof(struct iris_query_snapshots, start));
404    struct mi_value end_val =
405       query_mem64(q, offsetof(struct iris_query_snapshots, end));
406 
407    switch (q->type) {
408    case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
409       result = calc_overflow_for_stream(b, q, q->index);
410       break;
411    case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
412       result = calc_overflow_any_stream(b, q);
413       break;
414    case PIPE_QUERY_TIMESTAMP: {
415       /* TODO: This discards any fractional bits of the timebase scale.
416        * We would need to do a bit of fixed point math on the CS ALU, or
417        * launch an actual shader to calculate this with full precision.
418        */
419       uint32_t scale = 1000000000ull / devinfo->timestamp_frequency;
420       result = mi_iand(b, mi_imm((1ull << 36) - 1),
421                           mi_imul_imm(b, start_val, scale));
422       break;
423    }
424    case PIPE_QUERY_TIME_ELAPSED: {
425       /* TODO: This discards fractional bits (see above). */
426       uint32_t scale = 1000000000ull / devinfo->timestamp_frequency;
427       result = mi_imul_imm(b, mi_isub(b, end_val, start_val), scale);
428       break;
429    }
430    default:
431       result = mi_isub(b, end_val, start_val);
432       break;
433    }
434 
435    /* WaDividePSInvocationCountBy4:HSW,BDW */
436    if (GFX_VER == 8 &&
437        q->type == PIPE_QUERY_PIPELINE_STATISTICS_SINGLE &&
438        q->index == PIPE_STAT_QUERY_PS_INVOCATIONS)
439       result = mi_ushr32_imm(b, result, 2);
440 
441    if (query_is_boolean(q->type))
442       result = mi_iand(b, mi_nz(b, result), mi_imm(1));
443 
444    return result;
445 }
446 
447 static struct pipe_query *
iris_create_query(struct pipe_context * ctx,unsigned query_type,unsigned index)448 iris_create_query(struct pipe_context *ctx,
449                   unsigned query_type,
450                   unsigned index)
451 {
452    struct iris_query *q = calloc(1, sizeof(struct iris_query));
453 
454    q->type = query_type;
455    q->index = index;
456    q->monitor = NULL;
457 
458    if (q->type == PIPE_QUERY_PIPELINE_STATISTICS_SINGLE &&
459        q->index == PIPE_STAT_QUERY_CS_INVOCATIONS)
460       q->batch_idx = IRIS_BATCH_COMPUTE;
461    else
462       q->batch_idx = IRIS_BATCH_RENDER;
463    return (struct pipe_query *) q;
464 }
465 
466 static struct pipe_query *
iris_create_batch_query(struct pipe_context * ctx,unsigned num_queries,unsigned * query_types)467 iris_create_batch_query(struct pipe_context *ctx,
468                         unsigned num_queries,
469                         unsigned *query_types)
470 {
471    struct iris_context *ice = (void *) ctx;
472    struct iris_query *q = calloc(1, sizeof(struct iris_query));
473    if (unlikely(!q))
474       return NULL;
475    q->type = PIPE_QUERY_DRIVER_SPECIFIC;
476    q->index = -1;
477    q->monitor = iris_create_monitor_object(ice, num_queries, query_types);
478    if (unlikely(!q->monitor)) {
479       free(q);
480       return NULL;
481    }
482 
483    return (struct pipe_query *) q;
484 }
485 
486 static void
iris_destroy_query(struct pipe_context * ctx,struct pipe_query * p_query)487 iris_destroy_query(struct pipe_context *ctx, struct pipe_query *p_query)
488 {
489    struct iris_query *query = (void *) p_query;
490    struct iris_screen *screen = (void *) ctx->screen;
491    if (query->monitor) {
492       iris_destroy_monitor_object(ctx, query->monitor);
493       query->monitor = NULL;
494    } else {
495       iris_syncobj_reference(screen->bufmgr, &query->syncobj, NULL);
496       screen->base.fence_reference(ctx->screen, &query->fence, NULL);
497    }
498    pipe_resource_reference(&query->query_state_ref.res, NULL);
499    free(query);
500 }
501 
502 
503 static bool
iris_begin_query(struct pipe_context * ctx,struct pipe_query * query)504 iris_begin_query(struct pipe_context *ctx, struct pipe_query *query)
505 {
506    struct iris_context *ice = (void *) ctx;
507    struct iris_query *q = (void *) query;
508 
509    if (q->monitor)
510       return iris_begin_monitor(ctx, q->monitor);
511 
512    void *ptr = NULL;
513    uint32_t size;
514 
515    if (q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
516        q->type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
517       size = sizeof(struct iris_query_so_overflow);
518    else
519       size = sizeof(struct iris_query_snapshots);
520 
521    u_upload_alloc(ice->query_buffer_uploader, 0,
522                   size, util_next_power_of_two(size),
523                   &q->query_state_ref.offset,
524                   &q->query_state_ref.res, &ptr);
525 
526    if (!iris_resource_bo(q->query_state_ref.res))
527       return false;
528 
529    q->map = ptr;
530    if (!q->map)
531       return false;
532 
533    q->result = 0ull;
534    q->ready = false;
535    WRITE_ONCE(q->map->snapshots_landed, false);
536 
537    if (q->type == PIPE_QUERY_PRIMITIVES_GENERATED && q->index == 0) {
538       ice->state.prims_generated_query_active = true;
539       ice->state.dirty |= IRIS_DIRTY_STREAMOUT | IRIS_DIRTY_CLIP;
540    }
541 
542    if (q->type == PIPE_QUERY_OCCLUSION_COUNTER && q->index == 0) {
543       ice->state.occlusion_query_active = true;
544       ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
545    }
546 
547    if (q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
548        q->type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
549       write_overflow_values(ice, q, false);
550    else
551       write_value(ice, q,
552                   q->query_state_ref.offset +
553                   offsetof(struct iris_query_snapshots, start));
554 
555    return true;
556 }
557 
558 static bool
iris_end_query(struct pipe_context * ctx,struct pipe_query * query)559 iris_end_query(struct pipe_context *ctx, struct pipe_query *query)
560 {
561    struct iris_context *ice = (void *) ctx;
562    struct iris_query *q = (void *) query;
563 
564    if (q->monitor)
565       return iris_end_monitor(ctx, q->monitor);
566 
567    if (q->type == PIPE_QUERY_GPU_FINISHED) {
568       ctx->flush(ctx, &q->fence, PIPE_FLUSH_DEFERRED);
569       return true;
570    }
571 
572    struct iris_batch *batch = &ice->batches[q->batch_idx];
573 
574    if (q->type == PIPE_QUERY_TIMESTAMP) {
575       iris_begin_query(ctx, query);
576       iris_batch_reference_signal_syncobj(batch, &q->syncobj);
577       mark_available(ice, q);
578       return true;
579    }
580 
581    if (q->type == PIPE_QUERY_PRIMITIVES_GENERATED && q->index == 0) {
582       ice->state.prims_generated_query_active = false;
583       ice->state.dirty |= IRIS_DIRTY_STREAMOUT | IRIS_DIRTY_CLIP;
584    }
585 
586    if (q->type == PIPE_QUERY_OCCLUSION_COUNTER && q->index == 0) {
587       ice->state.occlusion_query_active = false;
588       ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
589    }
590 
591    if (q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE ||
592        q->type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE)
593       write_overflow_values(ice, q, true);
594    else
595       write_value(ice, q,
596                   q->query_state_ref.offset +
597                   offsetof(struct iris_query_snapshots, end));
598 
599    iris_batch_reference_signal_syncobj(batch, &q->syncobj);
600    mark_available(ice, q);
601 
602    return true;
603 }
604 
605 /**
606  * See if the snapshots have landed for a query, and if so, compute the
607  * result and mark it ready.  Does not flush (unlike iris_get_query_result).
608  */
609 static void
iris_check_query_no_flush(struct iris_context * ice,struct iris_query * q)610 iris_check_query_no_flush(struct iris_context *ice, struct iris_query *q)
611 {
612    struct iris_screen *screen = (void *) ice->ctx.screen;
613    const struct intel_device_info *devinfo = screen->devinfo;
614 
615    if (!q->ready && READ_ONCE(q->map->snapshots_landed)) {
616       calculate_result_on_cpu(devinfo, q);
617    }
618 }
619 
620 static bool
iris_get_query_result(struct pipe_context * ctx,struct pipe_query * query,bool wait,union pipe_query_result * result)621 iris_get_query_result(struct pipe_context *ctx,
622                       struct pipe_query *query,
623                       bool wait,
624                       union pipe_query_result *result)
625 {
626    struct iris_context *ice = (void *) ctx;
627    struct iris_query *q = (void *) query;
628 
629    if (q->monitor)
630       return iris_get_monitor_result(ctx, q->monitor, wait, result->batch);
631 
632    struct iris_screen *screen = (void *) ctx->screen;
633    const struct intel_device_info *devinfo = screen->devinfo;
634 
635    if (unlikely(screen->devinfo->no_hw)) {
636       result->u64 = 0;
637       return true;
638    }
639 
640    if (q->type == PIPE_QUERY_GPU_FINISHED) {
641       struct pipe_screen *screen = ctx->screen;
642 
643       result->b = screen->fence_finish(screen, ctx, q->fence,
644                                        wait ? OS_TIMEOUT_INFINITE : 0);
645       return result->b;
646    }
647 
648    if (!q->ready) {
649       struct iris_batch *batch = &ice->batches[q->batch_idx];
650       if (q->syncobj == iris_batch_get_signal_syncobj(batch))
651          iris_batch_flush(batch);
652 
653       while (!READ_ONCE(q->map->snapshots_landed)) {
654          if (wait)
655             iris_wait_syncobj(screen->bufmgr, q->syncobj, INT64_MAX);
656          else
657             return false;
658       }
659 
660       assert(READ_ONCE(q->map->snapshots_landed));
661       calculate_result_on_cpu(devinfo, q);
662    }
663 
664    assert(q->ready);
665 
666    result->u64 = q->result;
667 
668    return true;
669 }
670 
671 static void
iris_get_query_result_resource(struct pipe_context * ctx,struct pipe_query * query,enum pipe_query_flags flags,enum pipe_query_value_type result_type,int index,struct pipe_resource * p_res,unsigned offset)672 iris_get_query_result_resource(struct pipe_context *ctx,
673                                struct pipe_query *query,
674                                enum pipe_query_flags flags,
675                                enum pipe_query_value_type result_type,
676                                int index,
677                                struct pipe_resource *p_res,
678                                unsigned offset)
679 {
680    struct iris_context *ice = (void *) ctx;
681    struct iris_query *q = (void *) query;
682    struct iris_batch *batch = &ice->batches[q->batch_idx];
683    const struct intel_device_info *devinfo = batch->screen->devinfo;
684    struct iris_resource *res = (void *) p_res;
685    struct iris_bo *query_bo = iris_resource_bo(q->query_state_ref.res);
686    struct iris_bo *dst_bo = iris_resource_bo(p_res);
687    unsigned snapshots_landed_offset =
688       offsetof(struct iris_query_snapshots, snapshots_landed);
689 
690    res->bind_history |= PIPE_BIND_QUERY_BUFFER;
691 
692    if (index == -1) {
693       /* They're asking for the availability of the result.  If we still
694        * have commands queued up which produce the result, submit them
695        * now so that progress happens.  Either way, copy the snapshots
696        * landed field to the destination resource.
697        */
698       if (q->syncobj == iris_batch_get_signal_syncobj(batch))
699          iris_batch_flush(batch);
700 
701       batch->screen->vtbl.copy_mem_mem(batch, dst_bo, offset,
702                              query_bo, snapshots_landed_offset,
703                              result_type <= PIPE_QUERY_TYPE_U32 ? 4 : 8);
704       return;
705    }
706 
707    if (!q->ready && READ_ONCE(q->map->snapshots_landed)) {
708       /* The final snapshots happen to have landed, so let's just compute
709        * the result on the CPU now...
710        */
711       calculate_result_on_cpu(devinfo, q);
712    }
713 
714    if (q->ready) {
715       /* We happen to have the result on the CPU, so just copy it. */
716       if (result_type <= PIPE_QUERY_TYPE_U32) {
717          batch->screen->vtbl.store_data_imm32(batch, dst_bo, offset, q->result);
718       } else {
719          batch->screen->vtbl.store_data_imm64(batch, dst_bo, offset, q->result);
720       }
721 
722       /* Make sure QBO is flushed before its result is used elsewhere. */
723       iris_dirty_for_history(ice, res);
724       return;
725    }
726 
727    bool predicated = !(flags & PIPE_QUERY_WAIT) && !q->stalled;
728 
729    struct mi_builder b;
730    mi_builder_init(&b, batch->screen->devinfo, batch);
731    const uint32_t mocs = iris_mocs(query_bo, &batch->screen->isl_dev, 0);
732    mi_builder_set_mocs(&b, mocs);
733 
734    iris_batch_sync_region_start(batch);
735 
736    struct mi_value result = calculate_result_on_gpu(devinfo, &b, q);
737    struct mi_value dst =
738       result_type <= PIPE_QUERY_TYPE_U32 ?
739       mi_mem32(rw_bo(dst_bo, offset, IRIS_DOMAIN_OTHER_WRITE)) :
740       mi_mem64(rw_bo(dst_bo, offset, IRIS_DOMAIN_OTHER_WRITE));
741 
742    if (predicated) {
743       mi_store(&b, mi_reg32(MI_PREDICATE_RESULT),
744                    mi_mem64(ro_bo(query_bo, snapshots_landed_offset)));
745       mi_store_if(&b, dst, result);
746    } else {
747       mi_store(&b, dst, result);
748    }
749 
750    iris_batch_sync_region_end(batch);
751 }
752 
753 static void
iris_set_active_query_state(struct pipe_context * ctx,bool enable)754 iris_set_active_query_state(struct pipe_context *ctx, bool enable)
755 {
756    struct iris_context *ice = (void *) ctx;
757 
758    if (ice->state.statistics_counters_enabled == enable)
759       return;
760 
761    // XXX: most packets aren't paying attention to this yet, because it'd
762    // have to be done dynamically at draw time, which is a pain
763    ice->state.statistics_counters_enabled = enable;
764    ice->state.dirty |= IRIS_DIRTY_CLIP |
765                        IRIS_DIRTY_RASTER |
766                        IRIS_DIRTY_STREAMOUT |
767                        IRIS_DIRTY_WM;
768    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_GS |
769                              IRIS_STAGE_DIRTY_TCS |
770                              IRIS_STAGE_DIRTY_TES |
771                              IRIS_STAGE_DIRTY_VS;
772 }
773 
774 static void
set_predicate_enable(struct iris_context * ice,bool value)775 set_predicate_enable(struct iris_context *ice, bool value)
776 {
777    if (value)
778       ice->state.predicate = IRIS_PREDICATE_STATE_RENDER;
779    else
780       ice->state.predicate = IRIS_PREDICATE_STATE_DONT_RENDER;
781 }
782 
783 static void
set_predicate_for_result(struct iris_context * ice,struct iris_query * q,bool inverted)784 set_predicate_for_result(struct iris_context *ice,
785                          struct iris_query *q,
786                          bool inverted)
787 {
788    struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
789    struct iris_bo *bo = iris_resource_bo(q->query_state_ref.res);
790 
791    iris_batch_sync_region_start(batch);
792 
793    /* The CPU doesn't have the query result yet; use hardware predication */
794    ice->state.predicate = IRIS_PREDICATE_STATE_USE_BIT;
795 
796    /* Ensure the memory is coherent for MI_LOAD_REGISTER_* commands. */
797    iris_emit_pipe_control_flush(batch,
798                                 "conditional rendering: set predicate",
799                                 PIPE_CONTROL_FLUSH_ENABLE);
800    q->stalled = true;
801 
802    struct mi_builder b;
803    mi_builder_init(&b, batch->screen->devinfo, batch);
804    const uint32_t mocs = iris_mocs(bo, &batch->screen->isl_dev, 0);
805    mi_builder_set_mocs(&b, mocs);
806 
807    struct mi_value result;
808 
809    switch (q->type) {
810    case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
811       result = calc_overflow_for_stream(&b, q, q->index);
812       break;
813    case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
814       result = calc_overflow_any_stream(&b, q);
815       break;
816    default: {
817       /* PIPE_QUERY_OCCLUSION_* */
818       struct mi_value start =
819          query_mem64(q, offsetof(struct iris_query_snapshots, start));
820       struct mi_value end =
821          query_mem64(q, offsetof(struct iris_query_snapshots, end));
822       result = mi_isub(&b, end, start);
823       break;
824    }
825    }
826 
827    result = inverted ? mi_z(&b, result) : mi_nz(&b, result);
828    result = mi_iand(&b, result, mi_imm(1));
829 
830    /* We immediately set the predicate on the render batch, as all the
831     * counters come from 3D operations.  However, we may need to predicate
832     * a compute dispatch, which executes in a different GEM context and has
833     * a different MI_PREDICATE_RESULT register.  So, we save the result to
834     * memory and reload it in iris_launch_grid.
835     */
836    mi_value_ref(&b, result);
837    mi_store(&b, mi_reg32(MI_PREDICATE_RESULT), result);
838    mi_store(&b, query_mem64(q, offsetof(struct iris_query_snapshots,
839                                         predicate_result)), result);
840    ice->state.compute_predicate = bo;
841 
842    iris_batch_sync_region_end(batch);
843 }
844 
845 static void
iris_render_condition(struct pipe_context * ctx,struct pipe_query * query,bool condition,enum pipe_render_cond_flag mode)846 iris_render_condition(struct pipe_context *ctx,
847                       struct pipe_query *query,
848                       bool condition,
849                       enum pipe_render_cond_flag mode)
850 {
851    struct iris_context *ice = (void *) ctx;
852    struct iris_query *q = (void *) query;
853 
854    /* The old condition isn't relevant; we'll update it if necessary */
855    ice->state.compute_predicate = NULL;
856 
857    if (!q) {
858       ice->state.predicate = IRIS_PREDICATE_STATE_RENDER;
859       return;
860    }
861 
862    iris_check_query_no_flush(ice, q);
863 
864    if (q->result || q->ready) {
865       set_predicate_enable(ice, (q->result != 0) ^ condition);
866    } else {
867       if (mode == PIPE_RENDER_COND_NO_WAIT ||
868           mode == PIPE_RENDER_COND_BY_REGION_NO_WAIT) {
869          perf_debug(&ice->dbg, "Conditional rendering demoted from "
870                     "\"no wait\" to \"wait\".");
871       }
872       set_predicate_for_result(ice, q, condition);
873    }
874 }
875 
876 void
genX(init_query)877 genX(init_query)(struct iris_context *ice)
878 {
879    struct pipe_context *ctx = &ice->ctx;
880 
881    ctx->create_query = iris_create_query;
882    ctx->create_batch_query = iris_create_batch_query;
883    ctx->destroy_query = iris_destroy_query;
884    ctx->begin_query = iris_begin_query;
885    ctx->end_query = iris_end_query;
886    ctx->get_query_result = iris_get_query_result;
887    ctx->get_query_result_resource = iris_get_query_result_resource;
888    ctx->set_active_query_state = iris_set_active_query_state;
889    ctx->render_condition = iris_render_condition;
890 }
891