1 /*
2 * Copyright © 2020 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "ir3/ir3_compiler.h"
25
26 #include "util/u_math.h"
27
28 #include "adreno_pm4.xml.h"
29 #include "adreno_common.xml.h"
30 #include "a6xx.xml.h"
31
32 #include "common/freedreno_dev_info.h"
33
34 #include "ir3_asm.h"
35 #include "main.h"
36
37 #define FD_BO_NO_HARDPIN 1
38 #include "common/fd6_pack.h"
39
40 struct a6xx_backend {
41 struct backend base;
42
43 struct ir3_compiler *compiler;
44 struct fd_device *dev;
45
46 const struct fd_dev_info *info;
47
48 unsigned seqno;
49 struct fd_bo *control_mem;
50
51 struct fd_bo *query_mem;
52 const struct perfcntr *perfcntrs;
53 unsigned num_perfcntrs;
54 };
55 define_cast(backend, a6xx_backend);
56
57 /*
58 * Data structures shared with GPU:
59 */
60
61 /* This struct defines the layout of the fd6_context::control buffer: */
62 struct fd6_control {
63 uint32_t seqno; /* seqno for async CP_EVENT_WRITE, etc */
64 uint32_t _pad0;
65 volatile uint32_t vsc_overflow;
66 uint32_t _pad1;
67 /* flag set from cmdstream when VSC overflow detected: */
68 uint32_t vsc_scratch;
69 uint32_t _pad2;
70 uint32_t _pad3;
71 uint32_t _pad4;
72
73 /* scratch space for VPC_SO[i].FLUSH_BASE_LO/HI, start on 32 byte boundary. */
74 struct {
75 uint32_t offset;
76 uint32_t pad[7];
77 } flush_base[4];
78 };
79
80 #define control_ptr(a6xx_backend, member) \
81 (a6xx_backend)->control_mem, offsetof(struct fd6_control, member), 0, 0
82
83 struct PACKED fd6_query_sample {
84 uint64_t start;
85 uint64_t result;
86 uint64_t stop;
87 };
88
89 /* offset of a single field of an array of fd6_query_sample: */
90 #define query_sample_idx(a6xx_backend, idx, field) \
91 (a6xx_backend)->query_mem, \
92 (idx * sizeof(struct fd6_query_sample)) + \
93 offsetof(struct fd6_query_sample, field), \
94 0, 0
95
96 /*
97 * Backend implementation:
98 */
99
100 static struct kernel *
a6xx_assemble(struct backend * b,FILE * in)101 a6xx_assemble(struct backend *b, FILE *in)
102 {
103 struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
104 struct ir3_kernel *ir3_kernel = ir3_asm_assemble(a6xx_backend->compiler, in);
105 ir3_kernel->backend = b;
106 return &ir3_kernel->base;
107 }
108
109 static void
a6xx_disassemble(struct kernel * kernel,FILE * out)110 a6xx_disassemble(struct kernel *kernel, FILE *out)
111 {
112 ir3_asm_disassemble(to_ir3_kernel(kernel), out);
113 }
114
115 template<chip CHIP>
116 static void
cs_program_emit(struct fd_ringbuffer * ring,struct kernel * kernel)117 cs_program_emit(struct fd_ringbuffer *ring, struct kernel *kernel)
118 {
119 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
120 struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
121 struct ir3_shader_variant *v = ir3_kernel->v;
122 const unsigned *local_size = kernel->local_size;
123 const struct ir3_info *i = &v->info;
124 enum a6xx_threadsize thrsz = i->double_threadsize ? THREAD128 : THREAD64;
125
126 OUT_REG(ring, A6XX_SP_MODE_CONTROL(.constant_demotion_enable = true,
127 .isammode = ISAMMODE_GL,
128 .shared_consts_enable = false));
129
130 OUT_PKT4(ring, REG_A6XX_SP_PERFCTR_ENABLE, 1);
131 OUT_RING(ring, A6XX_SP_PERFCTR_ENABLE_CS);
132
133 OUT_PKT4(ring, REG_A6XX_SP_FLOAT_CNTL, 1);
134 OUT_RING(ring, 0);
135
136 for (size_t i = 0; i < ARRAY_SIZE(a6xx_backend->info->a6xx.magic_raw); i++) {
137 auto magic_reg = a6xx_backend->info->a6xx.magic_raw[i];
138 if (!magic_reg.reg)
139 break;
140
141 OUT_PKT4(ring, magic_reg.reg, 1);
142 OUT_RING(ring, magic_reg.value);
143 }
144
145 OUT_REG(ring, HLSQ_INVALIDATE_CMD(CHIP,
146 .vs_state = true,
147 .hs_state = true,
148 .ds_state = true,
149 .gs_state = true,
150 .fs_state = true,
151 .cs_state = true,
152 .gfx_ibo = true,
153 ));
154
155 unsigned constlen = align(v->constlen, 4);
156 OUT_REG(ring, HLSQ_CS_CNTL(CHIP, .constlen = constlen, .enabled = true, ));
157
158 OUT_PKT4(ring, REG_A6XX_SP_CS_CONFIG, 2);
159 OUT_RING(ring, A6XX_SP_CS_CONFIG_ENABLED |
160 A6XX_SP_CS_CONFIG_NIBO(kernel->num_bufs) |
161 A6XX_SP_CS_CONFIG_NTEX(v->num_samp) |
162 A6XX_SP_CS_CONFIG_NSAMP(v->num_samp)); /* SP_VS_CONFIG */
163 OUT_RING(ring, v->instrlen); /* SP_VS_INSTRLEN */
164
165 OUT_PKT4(ring, REG_A6XX_SP_CS_CTRL_REG0, 1);
166 OUT_RING(ring,
167 A6XX_SP_CS_CTRL_REG0_THREADSIZE(thrsz) |
168 A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(i->max_reg + 1) |
169 A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(i->max_half_reg + 1) |
170 COND(v->mergedregs, A6XX_SP_CS_CTRL_REG0_MERGEDREGS) |
171 COND(v->early_preamble, A6XX_SP_CS_CTRL_REG0_EARLYPREAMBLE) |
172 A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(ir3_shader_branchstack_hw(v)));
173 if (CHIP == A7XX) {
174 OUT_REG(ring, HLSQ_FS_CNTL_0(CHIP, .threadsize = THREAD64));
175
176 OUT_REG(ring, HLSQ_CONTROL_2_REG(CHIP, .dword = 0xfcfcfcfc),
177 HLSQ_CONTROL_3_REG(CHIP, .dword = 0xfcfcfcfc),
178 HLSQ_CONTROL_4_REG(CHIP, .dword = 0xfcfcfcfc),
179 HLSQ_CONTROL_5_REG(CHIP, .dword = 0x0000fc00), );
180 }
181
182 OUT_PKT4(ring, REG_A6XX_SP_CS_UNKNOWN_A9B1, 1);
183 OUT_RING(ring, A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE(1) |
184 A6XX_SP_CS_UNKNOWN_A9B1_UNK6);
185
186 if (CHIP == A6XX && a6xx_backend->info->a6xx.has_lpac) {
187 OUT_PKT4(ring, REG_A6XX_HLSQ_CS_UNKNOWN_B9D0, 1);
188 OUT_RING(ring, A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE(1) |
189 A6XX_HLSQ_CS_UNKNOWN_B9D0_UNK6);
190 }
191
192 uint32_t local_invocation_id, work_group_id;
193 local_invocation_id =
194 ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
195 work_group_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_WORKGROUP_ID);
196
197 if (CHIP == A6XX) {
198 OUT_PKT4(ring, REG_A6XX_HLSQ_CS_CNTL_0, 2);
199 OUT_RING(ring, A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id) |
200 A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID(regid(63, 0)) |
201 A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID(regid(63, 0)) |
202 A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
203 OUT_RING(ring, A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID(regid(63, 0)) |
204 A6XX_HLSQ_CS_CNTL_1_THREADSIZE(thrsz));
205 } else {
206 unsigned tile_height = (local_size[1] % 8 == 0) ? 3
207 : (local_size[1] % 4 == 0) ? 5
208 : (local_size[1] % 2 == 0) ? 9
209 : 17;
210
211 OUT_REG(ring,
212 HLSQ_CS_CNTL_1(CHIP,
213 .linearlocalidregid = regid(63, 0),
214 .threadsize = thrsz,
215 .workgrouprastorderzfirsten = true,
216 .wgtilewidth = 4,
217 .wgtileheight = tile_height,
218 )
219 );
220 }
221
222 if (CHIP == A7XX || a6xx_backend->info->a6xx.has_lpac) {
223 OUT_PKT4(ring, REG_A6XX_SP_CS_CNTL_0, 1);
224 OUT_RING(ring, A6XX_SP_CS_CNTL_0_WGIDCONSTID(work_group_id) |
225 A6XX_SP_CS_CNTL_0_WGSIZECONSTID(regid(63, 0)) |
226 A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID(regid(63, 0)) |
227 A6XX_SP_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
228 if (CHIP == A7XX) {
229 /* TODO allow the shader to control the tiling */
230 OUT_REG(ring,
231 SP_CS_CNTL_1(A7XX, .linearlocalidregid = regid(63, 0),
232 .threadsize = thrsz,
233 .workitemrastorder = WORKITEMRASTORDER_LINEAR));
234 } else {
235 OUT_REG(ring,
236 SP_CS_CNTL_1(CHIP, .linearlocalidregid = regid(63, 0),
237 .threadsize = thrsz));
238 }
239 }
240
241 OUT_PKT4(ring, REG_A6XX_SP_CS_OBJ_START, 2);
242 OUT_RELOC(ring, v->bo, 0, 0, 0); /* SP_CS_OBJ_START_LO/HI */
243
244 OUT_PKT4(ring, REG_A6XX_SP_CS_INSTRLEN, 1);
245 OUT_RING(ring, v->instrlen);
246
247 OUT_PKT4(ring, REG_A6XX_SP_CS_OBJ_START, 2);
248 OUT_RELOC(ring, v->bo, 0, 0, 0);
249
250 uint32_t shader_preload_size =
251 MIN2(v->instrlen, a6xx_backend->info->a6xx.instr_cache_size);
252 OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3);
253 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
254 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
255 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
256 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
257 CP_LOAD_STATE6_0_NUM_UNIT(shader_preload_size));
258 OUT_RELOC(ring, v->bo, 0, 0, 0);
259
260 if (v->pvtmem_size > 0) {
261 uint32_t per_fiber_size = v->pvtmem_size;
262 uint32_t per_sp_size =
263 ALIGN(per_fiber_size * a6xx_backend->info->fibers_per_sp, 1 << 12);
264 uint32_t total_size = per_sp_size * a6xx_backend->info->num_sp_cores;
265
266 struct fd_bo *pvtmem = fd_bo_new(a6xx_backend->dev, total_size, 0, "pvtmem");
267 OUT_PKT4(ring, REG_A6XX_SP_CS_PVT_MEM_PARAM, 4);
268 OUT_RING(ring, A6XX_SP_CS_PVT_MEM_PARAM_MEMSIZEPERITEM(per_fiber_size));
269 OUT_RELOC(ring, pvtmem, 0, 0, 0);
270 OUT_RING(ring, A6XX_SP_CS_PVT_MEM_SIZE_TOTALPVTMEMSIZE(per_sp_size) |
271 COND(v->pvtmem_per_wave,
272 A6XX_SP_CS_PVT_MEM_SIZE_PERWAVEMEMLAYOUT));
273
274 OUT_PKT4(ring, REG_A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET, 1);
275 OUT_RING(ring, A6XX_SP_CS_PVT_MEM_HW_STACK_OFFSET_OFFSET(per_sp_size));
276 }
277 }
278
279 template<chip CHIP>
280 static void
emit_const(struct fd_ringbuffer * ring,uint32_t regid,uint32_t sizedwords,const uint32_t * dwords)281 emit_const(struct fd_ringbuffer *ring, uint32_t regid, uint32_t sizedwords,
282 const uint32_t *dwords)
283 {
284 uint32_t align_sz;
285
286 assert((regid % 4) == 0);
287
288 align_sz = align(sizedwords, 4);
289
290 OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3 + align_sz);
291 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(regid / 4) |
292 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
293 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
294 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
295 CP_LOAD_STATE6_0_NUM_UNIT(DIV_ROUND_UP(sizedwords, 4)));
296 OUT_RING(ring, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
297 OUT_RING(ring, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
298
299 for (uint32_t i = 0; i < sizedwords; i++) {
300 OUT_RING(ring, dwords[i]);
301 }
302
303 /* Zero-pad to multiple of 4 dwords */
304 for (uint32_t i = sizedwords; i < align_sz; i++) {
305 OUT_RING(ring, 0);
306 }
307 }
308
309 template<chip CHIP>
310 static void
cs_const_emit(struct fd_ringbuffer * ring,struct kernel * kernel,uint32_t grid[3])311 cs_const_emit(struct fd_ringbuffer *ring, struct kernel *kernel,
312 uint32_t grid[3])
313 {
314 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
315 struct ir3_shader_variant *v = ir3_kernel->v;
316
317 const struct ir3_const_state *const_state = ir3_const_state(v);
318 uint32_t base = const_state->offsets.immediate;
319 int size = DIV_ROUND_UP(const_state->immediates_count, 4);
320
321 if (ir3_kernel->info.numwg != INVALID_REG) {
322 assert((ir3_kernel->info.numwg & 0x3) == 0);
323 int idx = ir3_kernel->info.numwg >> 2;
324 const_state->immediates[idx * 4 + 0] = grid[0];
325 const_state->immediates[idx * 4 + 1] = grid[1];
326 const_state->immediates[idx * 4 + 2] = grid[2];
327 }
328
329 for (int i = 0; i < MAX_BUFS; i++) {
330 if (kernel->buf_addr_regs[i] != INVALID_REG) {
331 assert((kernel->buf_addr_regs[i] & 0x3) == 0);
332 int idx = kernel->buf_addr_regs[i] >> 2;
333
334 uint64_t iova = fd_bo_get_iova(kernel->bufs[i]);
335
336 const_state->immediates[idx * 4 + 1] = iova >> 32;
337 const_state->immediates[idx * 4 + 0] = (iova << 32) >> 32;
338 }
339 }
340
341 /* truncate size to avoid writing constants that shader
342 * does not use:
343 */
344 size = MIN2(size + base, v->constlen) - base;
345
346 /* convert out of vec4: */
347 base *= 4;
348 size *= 4;
349
350 if (size > 0) {
351 emit_const<CHIP>(ring, base, size, const_state->immediates);
352 }
353 }
354
355 template<chip CHIP>
356 static void
cs_ibo_emit(struct fd_ringbuffer * ring,struct fd_submit * submit,struct kernel * kernel)357 cs_ibo_emit(struct fd_ringbuffer *ring, struct fd_submit *submit,
358 struct kernel *kernel)
359 {
360 struct fd_ringbuffer *state = fd_submit_new_ringbuffer(
361 submit, kernel->num_bufs * 16 * 4, FD_RINGBUFFER_STREAMING);
362
363 for (unsigned i = 0; i < kernel->num_bufs; i++) {
364 /* size is encoded with low 15b in WIDTH and high bits in HEIGHT,
365 * in units of elements:
366 */
367 unsigned sz = kernel->buf_sizes[i];
368 unsigned width = sz & MASK(15);
369 unsigned height = sz >> 15;
370
371 OUT_RING(state, A6XX_TEX_CONST_0_FMT(FMT6_32_UINT) | A6XX_TEX_CONST_0_TILE_MODE(TILE6_LINEAR));
372 OUT_RING(state, A6XX_TEX_CONST_1_WIDTH(width) | A6XX_TEX_CONST_1_HEIGHT(height));
373 OUT_RING(state, A6XX_TEX_CONST_2_PITCH(0) |
374 A6XX_TEX_CONST_2_STRUCTSIZETEXELS(1) |
375 A6XX_TEX_CONST_2_TYPE(A6XX_TEX_BUFFER));
376 OUT_RING(state, A6XX_TEX_CONST_3_ARRAY_PITCH(0));
377 OUT_RELOC(state, kernel->bufs[i], 0, 0, 0);
378 OUT_RING(state, 0x00000000);
379 OUT_RING(state, 0x00000000);
380 OUT_RING(state, 0x00000000);
381 OUT_RING(state, 0x00000000);
382 OUT_RING(state, 0x00000000);
383 OUT_RING(state, 0x00000000);
384 OUT_RING(state, 0x00000000);
385 OUT_RING(state, 0x00000000);
386 OUT_RING(state, 0x00000000);
387 OUT_RING(state, 0x00000000);
388 }
389
390 OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3);
391 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
392 CP_LOAD_STATE6_0_STATE_TYPE(ST6_IBO) |
393 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
394 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
395 CP_LOAD_STATE6_0_NUM_UNIT(kernel->num_bufs));
396 OUT_RB(ring, state);
397
398 OUT_PKT4(ring, REG_A6XX_SP_CS_IBO, 2);
399 OUT_RB(ring, state);
400
401 OUT_PKT4(ring, REG_A6XX_SP_CS_IBO_COUNT, 1);
402 OUT_RING(ring, kernel->num_bufs);
403
404 fd_ringbuffer_del(state);
405 }
406
407 template<chip CHIP>
408 static inline unsigned
event_write(struct fd_ringbuffer * ring,struct kernel * kernel,enum vgt_event_type evt,bool timestamp)409 event_write(struct fd_ringbuffer *ring, struct kernel *kernel,
410 enum vgt_event_type evt, bool timestamp)
411 {
412 unsigned seqno = 0;
413
414 if (CHIP == A6XX) {
415 OUT_PKT7(ring, CP_EVENT_WRITE, timestamp ? 4 : 1);
416 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(evt));
417 } else {
418 OUT_PKT7(ring, CP_EVENT_WRITE7, timestamp ? 4 : 1);
419 OUT_RING(ring,
420 CP_EVENT_WRITE7_0_EVENT(evt) |
421 COND(timestamp, CP_EVENT_WRITE7_0_WRITE_ENABLED |
422 CP_EVENT_WRITE7_0_WRITE_SRC(EV_WRITE_USER_32B)));
423 }
424
425 if (timestamp) {
426 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
427 struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
428 seqno = ++a6xx_backend->seqno;
429 OUT_RELOC(ring, control_ptr(a6xx_backend, seqno)); /* ADDR_LO/HI */
430 OUT_RING(ring, seqno);
431 }
432
433 return seqno;
434 }
435
436 template<chip CHIP>
437 static inline void
cache_flush(struct fd_ringbuffer * ring,struct kernel * kernel)438 cache_flush(struct fd_ringbuffer *ring, struct kernel *kernel)
439 {
440 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
441 struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
442 unsigned seqno;
443
444 seqno = event_write<CHIP>(ring, kernel, RB_DONE_TS, true);
445
446 OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
447 OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
448 CP_WAIT_REG_MEM_0_POLL(POLL_MEMORY));
449 OUT_RELOC(ring, control_ptr(a6xx_backend, seqno));
450 OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(seqno));
451 OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(~0));
452 OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
453
454 if (CHIP == A6XX) {
455 seqno = event_write<CHIP>(ring, kernel, CACHE_FLUSH_TS, true);
456
457 OUT_PKT7(ring, CP_WAIT_MEM_GTE, 4);
458 OUT_RING(ring, CP_WAIT_MEM_GTE_0_RESERVED(0));
459 OUT_RELOC(ring, control_ptr(a6xx_backend, seqno));
460 OUT_RING(ring, CP_WAIT_MEM_GTE_3_REF(seqno));
461 } else {
462 event_write<CHIP>(ring, kernel, CACHE_FLUSH7, false);
463 }
464 }
465
466 template<chip CHIP>
467 static void
a6xx_emit_grid(struct kernel * kernel,uint32_t grid[3],struct fd_submit * submit)468 a6xx_emit_grid(struct kernel *kernel, uint32_t grid[3],
469 struct fd_submit *submit)
470 {
471 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
472 struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
473 struct fd_ringbuffer *ring = fd_submit_new_ringbuffer(
474 submit, 0,
475 (enum fd_ringbuffer_flags)(FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE));
476
477 cs_program_emit<CHIP>(ring, kernel);
478 cs_const_emit<CHIP>(ring, kernel, grid);
479 cs_ibo_emit<CHIP>(ring, submit, kernel);
480
481 OUT_PKT7(ring, CP_SET_MARKER, 1);
482 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
483
484 const unsigned *local_size = kernel->local_size;
485 const unsigned *num_groups = grid;
486
487 unsigned work_dim = 0;
488 for (int i = 0; i < 3; i++) {
489 if (!grid[i])
490 break;
491 work_dim++;
492 }
493
494 OUT_REG(ring, HLSQ_CS_NDRANGE_0(CHIP,
495 .kerneldim = work_dim,
496 .localsizex = local_size[0] - 1,
497 .localsizey = local_size[1] - 1,
498 .localsizez = local_size[2] - 1,
499 ));
500 if (CHIP == A7XX) {
501 OUT_REG(ring, A7XX_HLSQ_CS_LOCAL_SIZE(.localsizex = local_size[0] - 1,
502 .localsizey = local_size[1] - 1,
503 .localsizez = local_size[2] - 1, ));
504 }
505
506 OUT_REG(ring, HLSQ_CS_NDRANGE_1(CHIP,
507 .globalsize_x = local_size[0] * num_groups[0],
508 ));
509 OUT_REG(ring, HLSQ_CS_NDRANGE_2(CHIP, 0));
510 OUT_REG(ring, HLSQ_CS_NDRANGE_3(CHIP,
511 .globalsize_y = local_size[1] * num_groups[1],
512 ));
513 OUT_REG(ring, HLSQ_CS_NDRANGE_4(CHIP, 0));
514 OUT_REG(ring, HLSQ_CS_NDRANGE_5(CHIP,
515 .globalsize_z = local_size[2] * num_groups[2],
516 ));
517 OUT_REG(ring, HLSQ_CS_NDRANGE_6(CHIP, 0));
518
519 OUT_REG(ring, HLSQ_CS_KERNEL_GROUP_X(CHIP, 1));
520 OUT_REG(ring, HLSQ_CS_KERNEL_GROUP_Y(CHIP, 1));
521 OUT_REG(ring, HLSQ_CS_KERNEL_GROUP_Z(CHIP, 1));
522
523 if (a6xx_backend->num_perfcntrs > 0) {
524 a6xx_backend->query_mem = fd_bo_new(
525 a6xx_backend->dev,
526 a6xx_backend->num_perfcntrs * sizeof(struct fd6_query_sample), 0, "query");
527
528 /* configure the performance counters to count the requested
529 * countables:
530 */
531 for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
532 const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
533
534 OUT_PKT4(ring, counter->select_reg, 1);
535 OUT_RING(ring, counter->selector);
536 }
537
538 OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
539
540 /* and snapshot the start values: */
541 for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
542 const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
543
544 OUT_PKT7(ring, CP_REG_TO_MEM, 3);
545 OUT_RING(ring, CP_REG_TO_MEM_0_64B |
546 CP_REG_TO_MEM_0_REG(counter->counter_reg_lo));
547 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, start));
548 }
549 }
550
551 OUT_PKT7(ring, CP_EXEC_CS, 4);
552 OUT_RING(ring, 0x00000000);
553 OUT_RING(ring, CP_EXEC_CS_1_NGROUPS_X(grid[0]));
554 OUT_RING(ring, CP_EXEC_CS_2_NGROUPS_Y(grid[1]));
555 OUT_RING(ring, CP_EXEC_CS_3_NGROUPS_Z(grid[2]));
556
557 OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
558
559 if (a6xx_backend->num_perfcntrs > 0) {
560 /* snapshot the end values: */
561 for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
562 const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
563
564 OUT_PKT7(ring, CP_REG_TO_MEM, 3);
565 OUT_RING(ring, CP_REG_TO_MEM_0_64B |
566 CP_REG_TO_MEM_0_REG(counter->counter_reg_lo));
567 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, stop));
568 }
569
570 /* and compute the result: */
571 for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
572 /* result += stop - start: */
573 OUT_PKT7(ring, CP_MEM_TO_MEM, 9);
574 OUT_RING(ring, CP_MEM_TO_MEM_0_DOUBLE | CP_MEM_TO_MEM_0_NEG_C);
575 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, result)); /* dst */
576 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, result)); /* srcA */
577 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, stop)); /* srcB */
578 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, start)); /* srcC */
579 }
580 }
581
582 cache_flush<CHIP>(ring, kernel);
583 }
584
585 static void
a6xx_set_perfcntrs(struct backend * b,const struct perfcntr * perfcntrs,unsigned num_perfcntrs)586 a6xx_set_perfcntrs(struct backend *b, const struct perfcntr *perfcntrs,
587 unsigned num_perfcntrs)
588 {
589 struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
590
591 a6xx_backend->perfcntrs = perfcntrs;
592 a6xx_backend->num_perfcntrs = num_perfcntrs;
593 }
594
595 static void
a6xx_read_perfcntrs(struct backend * b,uint64_t * results)596 a6xx_read_perfcntrs(struct backend *b, uint64_t *results)
597 {
598 struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
599
600 fd_bo_cpu_prep(a6xx_backend->query_mem, NULL, FD_BO_PREP_READ);
601 struct fd6_query_sample *samples =
602 (struct fd6_query_sample *)fd_bo_map(a6xx_backend->query_mem);
603
604 for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
605 results[i] = samples[i].result;
606 }
607 }
608
609 template<chip CHIP>
610 struct backend *
a6xx_init(struct fd_device * dev,const struct fd_dev_id * dev_id)611 a6xx_init(struct fd_device *dev, const struct fd_dev_id *dev_id)
612 {
613 struct a6xx_backend *a6xx_backend =
614 (struct a6xx_backend *)calloc(1, sizeof(*a6xx_backend));
615
616 a6xx_backend->base = (struct backend){
617 .assemble = a6xx_assemble,
618 .disassemble = a6xx_disassemble,
619 .emit_grid = a6xx_emit_grid<CHIP>,
620 .set_perfcntrs = a6xx_set_perfcntrs,
621 .read_perfcntrs = a6xx_read_perfcntrs,
622 };
623
624 struct ir3_compiler_options compiler_options = {};
625 a6xx_backend->compiler =
626 ir3_compiler_create(dev, dev_id, fd_dev_info_raw(dev_id), &compiler_options);
627 a6xx_backend->dev = dev;
628
629 a6xx_backend->info = fd_dev_info_raw(dev_id);
630
631 a6xx_backend->control_mem =
632 fd_bo_new(dev, 0x1000, 0, "control");
633
634 return &a6xx_backend->base;
635 }
636
637 template
638 struct backend *a6xx_init<A6XX>(struct fd_device *dev, const struct fd_dev_id *dev_id);
639
640 template
641 struct backend *a6xx_init<A7XX>(struct fd_device *dev, const struct fd_dev_id *dev_id);
642