xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/freedreno/freedreno_query_hw.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2014 Rob Clark <[email protected]>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <[email protected]>
7  */
8 
9 #ifndef FREEDRENO_QUERY_HW_H_
10 #define FREEDRENO_QUERY_HW_H_
11 
12 #include "util/list.h"
13 
14 #include "freedreno_context.h"
15 #include "freedreno_query.h"
16 
17 /*
18  * HW Queries:
19  *
20  * See: https://gitlab.freedesktop.org/freedreno/freedreno/-/wikis/Queries#hardware-queries
21  *
22  * Hardware queries will be specific to gpu generation, but they need
23  * some common infrastructure for triggering start/stop samples at
24  * various points (for example, to exclude mem2gmem/gmem2mem or clear)
25  * as well as per tile tracking.
26  *
27  * NOTE: in at least some cases hw writes sample values to memory addr
28  * specified in some register.  So we don't really have the option to
29  * just sample the same counter multiple times for multiple different
30  * queries with the same query_type.  So we cache per sample provider
31  * the most recent sample since the last draw.  This way multiple
32  * sample periods for multiple queries can reference the same sample.
33  *
34  * fd_hw_sample_provider:
35  *   - one per query type, registered/implemented by gpu generation
36  *     specific code
37  *   - can construct fd_hw_samples on demand
38  *   - most recent sample (since last draw) cached so multiple
39  *     different queries can ref the same sample
40  *
41  * fd_hw_sample:
42  *   - abstracts one snapshot of counter value(s) across N tiles
43  *   - backing object not allocated until submit time when number
44  *     of samples and number of tiles is known
45  *
46  * fd_hw_sample_period:
47  *   - consists of start and stop sample
48  *   - a query accumulates a list of sample periods
49  *   - the query result is the sum of the sample periods
50  */
51 
52 struct fd_hw_sample_provider {
53    unsigned query_type;
54 
55    /* Set if the provider should still count while !ctx->active_queries */
56    bool always;
57 
58    /* Optional hook for enabling a counter.  Guaranteed to happen
59     * at least once before the first ->get_sample() in a batch.
60     */
61    void (*enable)(struct fd_context *ctx, struct fd_ringbuffer *ring) dt;
62 
63    /* when a new sample is required, emit appropriate cmdstream
64     * and return a sample object:
65     */
66    struct fd_hw_sample *(*get_sample)(struct fd_batch *batch,
67                                       struct fd_ringbuffer *ring)dt;
68 
69    /* accumulate the results from specified sample period: */
70    void (*accumulate_result)(struct fd_context *ctx, const void *start,
71                              const void *end, union pipe_query_result *result);
72 };
73 
74 struct fd_hw_sample {
75    struct pipe_reference reference; /* keep this first */
76 
77    /* offset and size of the sample are know at the time the
78     * sample is constructed.
79     */
80    uint32_t size;
81    uint32_t offset;
82 
83    /* backing object, offset/stride/etc are determined not when
84     * the sample is constructed, but when the batch is submitted.
85     * This way we can defer allocation until total # of requested
86     * samples, and total # of tiles, is known.
87     */
88    struct pipe_resource *prsc;
89    uint32_t num_tiles;
90    uint32_t tile_stride;
91 };
92 
93 struct fd_hw_sample_period;
94 
95 struct fd_hw_query {
96    struct fd_query base;
97 
98    const struct fd_hw_sample_provider *provider;
99 
100    /* list of fd_hw_sample_periods: */
101    struct list_head periods;
102 
103    /* if active and not paused, the current sample period (not
104     * yet added to current_periods):
105     */
106    struct fd_hw_sample_period *period;
107 
108    struct list_head list; /* list-node in batch->active_queries */
109 };
110 
111 static inline struct fd_hw_query *
fd_hw_query(struct fd_query * q)112 fd_hw_query(struct fd_query *q)
113 {
114    return (struct fd_hw_query *)q;
115 }
116 
117 struct fd_query *fd_hw_create_query(struct fd_context *ctx, unsigned query_type,
118                                     unsigned index);
119 /* helper for sample providers: */
120 struct fd_hw_sample *fd_hw_sample_init(struct fd_batch *batch, uint32_t size);
121 /* don't call directly, use fd_hw_sample_reference() */
122 void __fd_hw_sample_destroy(struct fd_context *ctx, struct fd_hw_sample *samp);
123 void fd_hw_query_prepare(struct fd_batch *batch, uint32_t num_tiles) assert_dt;
124 void fd_hw_query_prepare_tile(struct fd_batch *batch, uint32_t n,
125                               struct fd_ringbuffer *ring) assert_dt;
126 void fd_hw_query_update_batch(struct fd_batch *batch, bool end_batch) assert_dt;
127 void fd_hw_query_enable(struct fd_batch *batch,
128                         struct fd_ringbuffer *ring) assert_dt;
129 void
130 fd_hw_query_register_provider(struct pipe_context *pctx,
131                               const struct fd_hw_sample_provider *provider);
132 void fd_hw_query_init(struct pipe_context *pctx);
133 void fd_hw_query_fini(struct pipe_context *pctx);
134 
135 static inline void
fd_hw_sample_reference(struct fd_context * ctx,struct fd_hw_sample ** ptr,struct fd_hw_sample * samp)136 fd_hw_sample_reference(struct fd_context *ctx, struct fd_hw_sample **ptr,
137                        struct fd_hw_sample *samp)
138 {
139    struct fd_hw_sample *old_samp = *ptr;
140 
141    if (pipe_reference(&(*ptr)->reference, &samp->reference))
142       __fd_hw_sample_destroy(ctx, old_samp);
143    *ptr = samp;
144 }
145 
146 #endif /* FREEDRENO_QUERY_HW_H_ */
147