1 /*
2 * Copyright © 2019 Rob Clark <[email protected]>
3 * SPDX-License-Identifier: MIT
4 *
5 * Authors:
6 * Rob Clark <[email protected]>
7 */
8
9 #include "drm/freedreno_ringbuffer.h"
10 #define FD_BO_NO_HARDPIN 1
11
12 #include "pipe/p_state.h"
13 #include "util/u_dump.h"
14 #include "u_tracepoints.h"
15
16 #include "freedreno_resource.h"
17 #include "freedreno_tracepoints.h"
18
19 #include "fd6_barrier.h"
20 #include "fd6_compute.h"
21 #include "fd6_const.h"
22 #include "fd6_context.h"
23 #include "fd6_emit.h"
24 #include "fd6_pack.h"
25
26 /* maybe move to fd6_program? */
27 template <chip CHIP>
28 static void
cs_program_emit(struct fd_context * ctx,struct fd_ringbuffer * ring,struct ir3_shader_variant * v)29 cs_program_emit(struct fd_context *ctx, struct fd_ringbuffer *ring,
30 struct ir3_shader_variant *v)
31 assert_dt
32 {
33 OUT_REG(ring, HLSQ_INVALIDATE_CMD(CHIP, .vs_state = true, .hs_state = true,
34 .ds_state = true, .gs_state = true,
35 .fs_state = true, .cs_state = true,
36 .cs_ibo = true, .gfx_ibo = true, ));
37
38 OUT_REG(ring, HLSQ_CS_CNTL(
39 CHIP,
40 .constlen = v->constlen,
41 .enabled = true,
42 ));
43
44 OUT_PKT4(ring, REG_A6XX_SP_CS_CONFIG, 1);
45 OUT_RING(ring, A6XX_SP_CS_CONFIG_ENABLED |
46 COND(v->bindless_tex, A6XX_SP_CS_CONFIG_BINDLESS_TEX) |
47 COND(v->bindless_samp, A6XX_SP_CS_CONFIG_BINDLESS_SAMP) |
48 COND(v->bindless_ibo, A6XX_SP_CS_CONFIG_BINDLESS_IBO) |
49 COND(v->bindless_ubo, A6XX_SP_CS_CONFIG_BINDLESS_UBO) |
50 A6XX_SP_CS_CONFIG_NIBO(ir3_shader_nibo(v)) |
51 A6XX_SP_CS_CONFIG_NTEX(v->num_samp) |
52 A6XX_SP_CS_CONFIG_NSAMP(v->num_samp)); /* SP_CS_CONFIG */
53
54 uint32_t local_invocation_id, work_group_id;
55 local_invocation_id =
56 ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
57 work_group_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_WORKGROUP_ID);
58
59 /*
60 * Devices that do not support double threadsize take the threadsize from
61 * A6XX_HLSQ_FS_CNTL_0_THREADSIZE instead of A6XX_HLSQ_CS_CNTL_1_THREADSIZE
62 * which is always set to THREAD128.
63 */
64 enum a6xx_threadsize thrsz = v->info.double_threadsize ? THREAD128 : THREAD64;
65 enum a6xx_threadsize thrsz_cs = ctx->screen->info->a6xx
66 .supports_double_threadsize ? thrsz : THREAD128;
67
68 if (CHIP == A6XX) {
69 OUT_PKT4(ring, REG_A6XX_HLSQ_CS_CNTL_0, 2);
70 OUT_RING(ring, A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id) |
71 A6XX_HLSQ_CS_CNTL_0_WGSIZECONSTID(regid(63, 0)) |
72 A6XX_HLSQ_CS_CNTL_0_WGOFFSETCONSTID(regid(63, 0)) |
73 A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
74 OUT_RING(ring, A6XX_HLSQ_CS_CNTL_1_LINEARLOCALIDREGID(regid(63, 0)) |
75 A6XX_HLSQ_CS_CNTL_1_THREADSIZE(thrsz_cs));
76 if (!ctx->screen->info->a6xx.supports_double_threadsize) {
77 OUT_PKT4(ring, REG_A6XX_HLSQ_FS_CNTL_0, 1);
78 OUT_RING(ring, A6XX_HLSQ_FS_CNTL_0_THREADSIZE(thrsz));
79 }
80
81 if (ctx->screen->info->a6xx.has_lpac) {
82 OUT_PKT4(ring, REG_A6XX_SP_CS_CNTL_0, 2);
83 OUT_RING(ring, A6XX_SP_CS_CNTL_0_WGIDCONSTID(work_group_id) |
84 A6XX_SP_CS_CNTL_0_WGSIZECONSTID(regid(63, 0)) |
85 A6XX_SP_CS_CNTL_0_WGOFFSETCONSTID(regid(63, 0)) |
86 A6XX_SP_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
87 OUT_RING(ring, A6XX_SP_CS_CNTL_1_LINEARLOCALIDREGID(regid(63, 0)) |
88 A6XX_SP_CS_CNTL_1_THREADSIZE(thrsz));
89 }
90 } else {
91 unsigned tile_height = (v->local_size[1] % 8 == 0) ? 3
92 : (v->local_size[1] % 4 == 0) ? 5
93 : (v->local_size[1] % 2 == 0) ? 9
94 : 17;
95
96 OUT_REG(ring,
97 HLSQ_CS_CNTL_1(
98 CHIP,
99 .linearlocalidregid = regid(63, 0),
100 .threadsize = thrsz_cs,
101 .workgrouprastorderzfirsten = true,
102 .wgtilewidth = 4,
103 .wgtileheight = tile_height,
104 )
105 );
106
107 OUT_REG(ring, HLSQ_FS_CNTL_0(CHIP, .threadsize = THREAD64));
108 OUT_REG(ring,
109 A6XX_SP_CS_CNTL_0(
110 .wgidconstid = work_group_id,
111 .wgsizeconstid = INVALID_REG,
112 .wgoffsetconstid = INVALID_REG,
113 .localidregid = local_invocation_id,
114 )
115 );
116 OUT_REG(ring,
117 SP_CS_CNTL_1(
118 CHIP,
119 .linearlocalidregid = INVALID_REG,
120 .threadsize = thrsz_cs,
121 .workitemrastorder =
122 v->cs.force_linear_dispatch ? WORKITEMRASTORDER_LINEAR
123 : WORKITEMRASTORDER_TILED,
124 )
125 );
126 OUT_REG(ring,
127 A7XX_HLSQ_CS_LOCAL_SIZE(
128 .localsizex = v->local_size[0] - 1,
129 .localsizey = v->local_size[1] - 1,
130 .localsizez = v->local_size[2] - 1,
131 )
132 );
133 OUT_REG(ring, A7XX_SP_CS_UNKNOWN_A9BE(0)); // Sometimes is 0x08000000
134 }
135
136 fd6_emit_shader<CHIP>(ctx, ring, v);
137 }
138
139 template <chip CHIP>
140 static void
fd6_launch_grid(struct fd_context * ctx,const struct pipe_grid_info * info)141 fd6_launch_grid(struct fd_context *ctx, const struct pipe_grid_info *info) in_dt
142 {
143 struct fd6_compute_state *cs = (struct fd6_compute_state *)ctx->compute;
144 struct fd_ringbuffer *ring = ctx->batch->draw;
145
146 if (unlikely(!cs->v)) {
147 struct ir3_shader_state *hwcso = (struct ir3_shader_state *)cs->hwcso;
148 struct ir3_shader_key key = {};
149
150 cs->v = ir3_shader_variant(ir3_get_shader(hwcso), key, false, &ctx->debug);
151 if (!cs->v)
152 return;
153
154 cs->stateobj = fd_ringbuffer_new_object(ctx->pipe, 0x1000);
155 cs_program_emit<CHIP>(ctx, cs->stateobj, cs->v);
156
157 cs->user_consts_cmdstream_size = fd6_user_consts_cmdstream_size(cs->v);
158 }
159
160 trace_start_compute(&ctx->batch->trace, ring, !!info->indirect, info->work_dim,
161 info->block[0], info->block[1], info->block[2],
162 info->grid[0], info->grid[1], info->grid[2],
163 cs->v->shader_id);
164
165 if (ctx->batch->barrier)
166 fd6_barrier_flush<CHIP>(ctx->batch);
167
168 bool emit_instrlen_workaround =
169 cs->v->instrlen > ctx->screen->info->a6xx.instr_cache_size;
170
171 /* There appears to be a HW bug where in some rare circumstances it appears
172 * to accidentally use the FS instrlen instead of the CS instrlen, which
173 * affects all known gens. Based on various experiments it appears that the
174 * issue is that when prefetching a branch destination and there is a cache
175 * miss, when fetching from memory the HW bounds-checks the fetch against
176 * SP_CS_INSTRLEN, except when one of the two register contexts is active
177 * it accidentally fetches SP_FS_INSTRLEN from the other (inactive)
178 * context. To workaround it we set the FS instrlen here and do a dummy
179 * event to roll the context (because it fetches SP_FS_INSTRLEN from the
180 * "wrong" context). Because the bug seems to involve cache misses, we
181 * don't emit this if the entire CS program fits in cache, which will
182 * hopefully be the majority of cases.
183 *
184 * See https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/19023
185 */
186 if (emit_instrlen_workaround) {
187 OUT_REG(ring, A6XX_SP_FS_INSTRLEN(cs->v->instrlen));
188 fd6_event_write<CHIP>(ctx, ring, FD_LABEL);
189 }
190
191 if (ctx->gen_dirty)
192 fd6_emit_cs_state<CHIP>(ctx, ring, cs);
193
194 if (ctx->gen_dirty & BIT(FD6_GROUP_CONST))
195 fd6_emit_cs_user_consts(ctx, ring, cs);
196
197 if (cs->v->need_driver_params || info->input)
198 fd6_emit_cs_driver_params(ctx, ring, cs, info);
199
200 OUT_PKT7(ring, CP_SET_MARKER, 1);
201 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
202
203 uint32_t shared_size =
204 MAX2(((int)(cs->v->cs.req_local_mem + info->variable_shared_mem) - 1) / 1024, 1);
205 OUT_PKT4(ring, REG_A6XX_SP_CS_UNKNOWN_A9B1, 1);
206 OUT_RING(ring, A6XX_SP_CS_UNKNOWN_A9B1_SHARED_SIZE(shared_size) |
207 A6XX_SP_CS_UNKNOWN_A9B1_UNK6);
208
209 if (ctx->screen->info->a6xx.has_lpac) {
210 OUT_PKT4(ring, REG_A6XX_HLSQ_CS_UNKNOWN_B9D0, 1);
211 OUT_RING(ring, A6XX_HLSQ_CS_UNKNOWN_B9D0_SHARED_SIZE(shared_size) |
212 A6XX_HLSQ_CS_UNKNOWN_B9D0_UNK6);
213 }
214
215 const unsigned *local_size =
216 info->block; // v->shader->nir->info->workgroup_size;
217 const unsigned *num_groups = info->grid;
218 /* for some reason, mesa/st doesn't set info->work_dim, so just assume 3: */
219 const unsigned work_dim = info->work_dim ? info->work_dim : 3;
220
221 OUT_REG(ring,
222 HLSQ_CS_NDRANGE_0(
223 CHIP,
224 .kerneldim = work_dim,
225 .localsizex = local_size[0] - 1,
226 .localsizey = local_size[1] - 1,
227 .localsizez = local_size[2] - 1,
228 ),
229 HLSQ_CS_NDRANGE_1(
230 CHIP,
231 .globalsize_x = local_size[0] * num_groups[0],
232 ),
233 HLSQ_CS_NDRANGE_2(CHIP, .globaloff_x = 0),
234 HLSQ_CS_NDRANGE_3(
235 CHIP,
236 .globalsize_y = local_size[1] * num_groups[1],
237 ),
238 HLSQ_CS_NDRANGE_4(CHIP, .globaloff_y = 0),
239 HLSQ_CS_NDRANGE_5(
240 CHIP,
241 .globalsize_z = local_size[2] * num_groups[2],
242 ),
243 HLSQ_CS_NDRANGE_6(CHIP, .globaloff_z = 0),
244 );
245
246 OUT_REG(ring,
247 HLSQ_CS_KERNEL_GROUP_X(CHIP, 1),
248 HLSQ_CS_KERNEL_GROUP_Y(CHIP, 1),
249 HLSQ_CS_KERNEL_GROUP_Z(CHIP, 1),
250 );
251
252 if (info->indirect) {
253 struct fd_resource *rsc = fd_resource(info->indirect);
254
255 OUT_PKT7(ring, CP_EXEC_CS_INDIRECT, 4);
256 OUT_RING(ring, 0x00000000);
257 OUT_RELOC(ring, rsc->bo, info->indirect_offset, 0, 0); /* ADDR_LO/HI */
258 OUT_RING(ring,
259 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size[0] - 1) |
260 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size[1] - 1) |
261 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size[2] - 1));
262 } else {
263 OUT_PKT7(ring, CP_EXEC_CS, 4);
264 OUT_RING(ring, 0x00000000);
265 OUT_RING(ring, CP_EXEC_CS_1_NGROUPS_X(info->grid[0]));
266 OUT_RING(ring, CP_EXEC_CS_2_NGROUPS_Y(info->grid[1]));
267 OUT_RING(ring, CP_EXEC_CS_3_NGROUPS_Z(info->grid[2]));
268 }
269
270 trace_end_compute(&ctx->batch->trace, ring);
271
272 fd_context_all_clean(ctx);
273 }
274
275 static void *
fd6_compute_state_create(struct pipe_context * pctx,const struct pipe_compute_state * cso)276 fd6_compute_state_create(struct pipe_context *pctx,
277 const struct pipe_compute_state *cso)
278 {
279 struct fd6_compute_state *hwcso =
280 (struct fd6_compute_state *)calloc(1, sizeof(*hwcso));
281 hwcso->hwcso = ir3_shader_compute_state_create(pctx, cso);
282 return hwcso;
283 }
284
285 static void
fd6_compute_state_delete(struct pipe_context * pctx,void * _hwcso)286 fd6_compute_state_delete(struct pipe_context *pctx, void *_hwcso)
287 {
288 struct fd6_compute_state *hwcso = (struct fd6_compute_state *)_hwcso;
289 ir3_shader_state_delete(pctx, hwcso->hwcso);
290 if (hwcso->stateobj)
291 fd_ringbuffer_del(hwcso->stateobj);
292 free(hwcso);
293 }
294
295 static void
fd6_get_compute_state_info(struct pipe_context * pctx,void * cso,struct pipe_compute_state_object_info * info)296 fd6_get_compute_state_info(struct pipe_context *pctx, void *cso, struct pipe_compute_state_object_info *info)
297 {
298 static struct ir3_shader_key key; /* static is implicitly zeroed */
299 struct fd6_compute_state *cs = (struct fd6_compute_state *)cso;
300 struct ir3_shader_state *hwcso = (struct ir3_shader_state *)cs->hwcso;
301 struct ir3_shader_variant *v = ir3_shader_variant(ir3_get_shader(hwcso), key, false, &pctx->debug);
302 struct fd_context *ctx = fd_context(pctx);
303 uint32_t threadsize_base = ctx->screen->info->threadsize_base;
304
305 info->max_threads = threadsize_base * ctx->screen->info->max_waves;
306 info->simd_sizes = threadsize_base;
307 info->preferred_simd_size = threadsize_base;
308
309 if (ctx->screen->info->a6xx.supports_double_threadsize &&
310 v->info.double_threadsize) {
311
312 info->max_threads *= 2;
313 info->simd_sizes |= (threadsize_base * 2);
314 info->preferred_simd_size *= 2;
315 }
316
317 info->private_memory = v->pvtmem_size;
318 }
319
320 template <chip CHIP>
321 void
fd6_compute_init(struct pipe_context * pctx)322 fd6_compute_init(struct pipe_context *pctx)
323 disable_thread_safety_analysis
324 {
325 struct fd_context *ctx = fd_context(pctx);
326
327 ctx->launch_grid = fd6_launch_grid<CHIP>;
328 pctx->create_compute_state = fd6_compute_state_create;
329 pctx->delete_compute_state = fd6_compute_state_delete;
330 pctx->get_compute_state_info = fd6_get_compute_state_info;
331 }
332 FD_GENX(fd6_compute_init);
333