1 /*
2 * Copyright © 2018 Jonathan Marek <[email protected]>
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Jonathan Marek <[email protected]>
7 * Rob Clark <[email protected]>
8 */
9
10 /* NOTE: perfcntrs are 48-bits but we only have 32-bit accumulate (?)
11 * so we work with 32-bits only. we accumulate start/stop separately,
12 * which differs from a5xx but works with only accumulate (no add/neg)
13 */
14
15 #include "freedreno_query_acc.h"
16 #include "freedreno_resource.h"
17
18 #include "fd2_context.h"
19 #include "fd2_query.h"
20
21 struct PACKED fd2_query_sample {
22 struct fd_acc_query_sample base;
23 uint32_t start;
24 uint32_t stop;
25 };
26 FD_DEFINE_CAST(fd_acc_query_sample, fd2_query_sample);
27
28 /* offset of a single field of an array of fd2_query_sample: */
29 #define query_sample_idx(aq, idx, field) \
30 fd_resource((aq)->prsc)->bo, \
31 (idx * sizeof(struct fd2_query_sample)) + \
32 offsetof(struct fd2_query_sample, field), \
33 0, 0
34
35 /* offset of a single field of fd2_query_sample: */
36 #define query_sample(aq, field) query_sample_idx(aq, 0, field)
37
38 /*
39 * Performance Counter (batch) queries:
40 *
41 * Only one of these is active at a time, per design of the gallium
42 * batch_query API design. On perfcntr query tracks N query_types,
43 * each of which has a 'fd_batch_query_entry' that maps it back to
44 * the associated group and counter.
45 */
46
47 struct fd_batch_query_entry {
48 uint8_t gid; /* group-id */
49 uint8_t cid; /* countable-id within the group */
50 };
51
52 struct fd_batch_query_data {
53 struct fd_screen *screen;
54 unsigned num_query_entries;
55 struct fd_batch_query_entry query_entries[];
56 };
57
58 static void
perfcntr_resume(struct fd_acc_query * aq,struct fd_batch * batch)59 perfcntr_resume(struct fd_acc_query *aq, struct fd_batch *batch) assert_dt
60 {
61 struct fd_batch_query_data *data = aq->query_data;
62 struct fd_screen *screen = data->screen;
63 struct fd_ringbuffer *ring = batch->draw;
64
65 unsigned counters_per_group[screen->num_perfcntr_groups];
66 memset(counters_per_group, 0, sizeof(counters_per_group));
67
68 fd_wfi(batch, ring);
69
70 /* configure performance counters for the requested queries: */
71 for (unsigned i = 0; i < data->num_query_entries; i++) {
72 struct fd_batch_query_entry *entry = &data->query_entries[i];
73 const struct fd_perfcntr_group *g = &screen->perfcntr_groups[entry->gid];
74 unsigned counter_idx = counters_per_group[entry->gid]++;
75
76 assert(counter_idx < g->num_counters);
77
78 OUT_PKT0(ring, g->counters[counter_idx].select_reg, 1);
79 OUT_RING(ring, g->countables[entry->cid].selector);
80 }
81
82 memset(counters_per_group, 0, sizeof(counters_per_group));
83
84 /* and snapshot the start values */
85 for (unsigned i = 0; i < data->num_query_entries; i++) {
86 struct fd_batch_query_entry *entry = &data->query_entries[i];
87 const struct fd_perfcntr_group *g = &screen->perfcntr_groups[entry->gid];
88 unsigned counter_idx = counters_per_group[entry->gid]++;
89 const struct fd_perfcntr_counter *counter = &g->counters[counter_idx];
90
91 OUT_PKT3(ring, CP_REG_TO_MEM, 2);
92 OUT_RING(ring, counter->counter_reg_lo | CP_REG_TO_MEM_0_ACCUMULATE);
93 OUT_RELOC(ring, query_sample_idx(aq, i, start));
94 }
95 }
96
97 static void
perfcntr_pause(struct fd_acc_query * aq,struct fd_batch * batch)98 perfcntr_pause(struct fd_acc_query *aq, struct fd_batch *batch) assert_dt
99 {
100 struct fd_batch_query_data *data = aq->query_data;
101 struct fd_screen *screen = data->screen;
102 struct fd_ringbuffer *ring = batch->draw;
103
104 unsigned counters_per_group[screen->num_perfcntr_groups];
105 memset(counters_per_group, 0, sizeof(counters_per_group));
106
107 fd_wfi(batch, ring);
108
109 /* TODO do we need to bother to turn anything off? */
110
111 /* snapshot the end values: */
112 for (unsigned i = 0; i < data->num_query_entries; i++) {
113 struct fd_batch_query_entry *entry = &data->query_entries[i];
114 const struct fd_perfcntr_group *g = &screen->perfcntr_groups[entry->gid];
115 unsigned counter_idx = counters_per_group[entry->gid]++;
116 const struct fd_perfcntr_counter *counter = &g->counters[counter_idx];
117
118 OUT_PKT3(ring, CP_REG_TO_MEM, 2);
119 OUT_RING(ring, counter->counter_reg_lo | CP_REG_TO_MEM_0_ACCUMULATE);
120 OUT_RELOC(ring, query_sample_idx(aq, i, stop));
121 }
122 }
123
124 static void
perfcntr_accumulate_result(struct fd_acc_query * aq,struct fd_acc_query_sample * s,union pipe_query_result * result)125 perfcntr_accumulate_result(struct fd_acc_query *aq,
126 struct fd_acc_query_sample *s,
127 union pipe_query_result *result)
128 {
129 struct fd_batch_query_data *data = aq->query_data;
130 struct fd2_query_sample *sp = fd2_query_sample(s);
131
132 for (unsigned i = 0; i < data->num_query_entries; i++)
133 result->batch[i].u64 = sp[i].stop - sp[i].start;
134 }
135
136 static const struct fd_acc_sample_provider perfcntr = {
137 .query_type = FD_QUERY_FIRST_PERFCNTR,
138 .always = true,
139 .resume = perfcntr_resume,
140 .pause = perfcntr_pause,
141 .result = perfcntr_accumulate_result,
142 };
143
144 static struct pipe_query *
fd2_create_batch_query(struct pipe_context * pctx,unsigned num_queries,unsigned * query_types)145 fd2_create_batch_query(struct pipe_context *pctx, unsigned num_queries,
146 unsigned *query_types)
147 {
148 struct fd_context *ctx = fd_context(pctx);
149 struct fd_screen *screen = ctx->screen;
150 struct fd_query *q;
151 struct fd_acc_query *aq;
152 struct fd_batch_query_data *data;
153
154 data = CALLOC_VARIANT_LENGTH_STRUCT(
155 fd_batch_query_data, num_queries * sizeof(data->query_entries[0]));
156
157 data->screen = screen;
158 data->num_query_entries = num_queries;
159
160 /* validate the requested query_types and ensure we don't try
161 * to request more query_types of a given group than we have
162 * counters:
163 */
164 unsigned counters_per_group[screen->num_perfcntr_groups];
165 memset(counters_per_group, 0, sizeof(counters_per_group));
166
167 for (unsigned i = 0; i < num_queries; i++) {
168 unsigned idx = query_types[i] - FD_QUERY_FIRST_PERFCNTR;
169
170 /* verify valid query_type, ie. is it actually a perfcntr? */
171 if ((query_types[i] < FD_QUERY_FIRST_PERFCNTR) ||
172 (idx >= screen->num_perfcntr_queries)) {
173 mesa_loge("invalid batch query query_type: %u", query_types[i]);
174 goto error;
175 }
176
177 struct fd_batch_query_entry *entry = &data->query_entries[i];
178 struct pipe_driver_query_info *pq = &screen->perfcntr_queries[idx];
179
180 entry->gid = pq->group_id;
181
182 /* the perfcntr_queries[] table flattens all the countables
183 * for each group in series, ie:
184 *
185 * (G0,C0), .., (G0,Cn), (G1,C0), .., (G1,Cm), ...
186 *
187 * So to find the countable index just step back through the
188 * table to find the first entry with the same group-id.
189 */
190 while (pq > screen->perfcntr_queries) {
191 pq--;
192 if (pq->group_id == entry->gid)
193 entry->cid++;
194 }
195
196 if (counters_per_group[entry->gid] >=
197 screen->perfcntr_groups[entry->gid].num_counters) {
198 mesa_loge("too many counters for group %u", entry->gid);
199 goto error;
200 }
201
202 counters_per_group[entry->gid]++;
203 }
204
205 q = fd_acc_create_query2(ctx, 0, 0, &perfcntr);
206 aq = fd_acc_query(q);
207
208 /* sample buffer size is based on # of queries: */
209 aq->size = num_queries * sizeof(struct fd2_query_sample);
210 aq->query_data = data;
211
212 return (struct pipe_query *)q;
213
214 error:
215 free(data);
216 return NULL;
217 }
218
219 void
fd2_query_context_init(struct pipe_context * pctx)220 fd2_query_context_init(struct pipe_context *pctx) disable_thread_safety_analysis
221 {
222 struct fd_context *ctx = fd_context(pctx);
223
224 ctx->create_query = fd_acc_create_query;
225 ctx->query_update_batch = fd_acc_query_update_batch;
226
227 pctx->create_batch_query = fd2_create_batch_query;
228 }
229