xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/freedreno/a5xx/fd5_compute.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2017 Rob Clark <[email protected]>
3  * SPDX-License-Identifier: MIT
4  *
5  * Authors:
6  *    Rob Clark <[email protected]>
7  */
8 
9 #include "pipe/p_state.h"
10 
11 #include "freedreno_resource.h"
12 
13 #include "fd5_compute.h"
14 #include "fd5_context.h"
15 #include "fd5_emit.h"
16 
17 /* maybe move to fd5_program? */
18 static void
cs_program_emit(struct fd_context * ctx,struct fd_ringbuffer * ring,struct ir3_shader_variant * v)19 cs_program_emit(struct fd_context *ctx, struct fd_ringbuffer *ring, struct ir3_shader_variant *v) assert_dt
20 {
21    const struct ir3_info *i = &v->info;
22    enum a3xx_threadsize thrsz = i->double_threadsize ? FOUR_QUADS : TWO_QUADS;
23    unsigned instrlen = v->instrlen;
24 
25    /* if shader is more than 32*16 instructions, don't preload it.  Similar
26     * to the combined restriction of 64*16 for VS+FS
27     */
28    if (instrlen > 32)
29       instrlen = 0;
30 
31    OUT_PKT4(ring, REG_A5XX_SP_SP_CNTL, 1);
32    OUT_RING(ring, 0x00000000); /* SP_SP_CNTL */
33 
34    OUT_PKT4(ring, REG_A5XX_HLSQ_CONTROL_0_REG, 1);
35    OUT_RING(ring, A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(TWO_QUADS) |
36                      A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE(thrsz) |
37                      0x00000880 /* XXX */);
38 
39    OUT_PKT4(ring, REG_A5XX_SP_CS_CTRL_REG0, 1);
40    OUT_RING(ring,
41             A5XX_SP_CS_CTRL_REG0_THREADSIZE(thrsz) |
42                A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(i->max_half_reg + 1) |
43                A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(i->max_reg + 1) |
44                A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(ir3_shader_branchstack_hw(v)) |
45                COND(instrlen != 0, A5XX_SP_CS_CTRL_REG0_BUFFER) |
46                0x2 /* XXX */);
47 
48    OUT_PKT4(ring, REG_A5XX_HLSQ_CS_CONFIG, 1);
49    OUT_RING(ring, A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET(0) |
50                      A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET(0) |
51                      A5XX_HLSQ_CS_CONFIG_ENABLED);
52 
53    OUT_PKT4(ring, REG_A5XX_HLSQ_CS_CNTL, 1);
54    OUT_RING(ring, A5XX_HLSQ_CS_CNTL_INSTRLEN(instrlen) |
55                      COND(v->has_ssbo, A5XX_HLSQ_CS_CNTL_SSBO_ENABLE));
56 
57    OUT_PKT4(ring, REG_A5XX_SP_CS_CONFIG, 1);
58    OUT_RING(ring, A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET(0) |
59                      A5XX_SP_CS_CONFIG_SHADEROBJOFFSET(0) |
60                      A5XX_SP_CS_CONFIG_ENABLED);
61 
62    assert(v->constlen % 4 == 0);
63    unsigned constlen = v->constlen / 4;
64    OUT_PKT4(ring, REG_A5XX_HLSQ_CS_CONSTLEN, 2);
65    OUT_RING(ring, constlen); /* HLSQ_CS_CONSTLEN */
66    OUT_RING(ring, instrlen); /* HLSQ_CS_INSTRLEN */
67 
68    fd5_emit_shader_obj(ctx, ring, v, REG_A5XX_SP_CS_OBJ_START_LO);
69 
70    OUT_PKT4(ring, REG_A5XX_HLSQ_UPDATE_CNTL, 1);
71    OUT_RING(ring, 0x1f00000);
72 
73    uint32_t local_invocation_id, work_group_id;
74    local_invocation_id =
75       ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
76    work_group_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_WORKGROUP_ID);
77 
78    OUT_PKT4(ring, REG_A5XX_HLSQ_CS_CNTL_0, 2);
79    OUT_RING(ring, A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id) |
80                      A5XX_HLSQ_CS_CNTL_0_UNK0(regid(63, 0)) |
81                      A5XX_HLSQ_CS_CNTL_0_UNK1(regid(63, 0)) |
82                      A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
83    OUT_RING(ring, 0x1); /* HLSQ_CS_CNTL_1 */
84 
85    if (instrlen > 0)
86       fd5_emit_shader(ring, v);
87 }
88 
89 static void
fd5_launch_grid(struct fd_context * ctx,const struct pipe_grid_info * info)90 fd5_launch_grid(struct fd_context *ctx,
91                 const struct pipe_grid_info *info) assert_dt
92 {
93    struct ir3_shader_key key = {};
94    struct ir3_shader_variant *v;
95    struct fd_ringbuffer *ring = ctx->batch->draw;
96    unsigned nglobal = 0;
97 
98    v =
99       ir3_shader_variant(ir3_get_shader(ctx->compute), key, false, &ctx->debug);
100    if (!v)
101       return;
102 
103    if (ctx->dirty_shader[PIPE_SHADER_COMPUTE] & FD_DIRTY_SHADER_PROG)
104       cs_program_emit(ctx, ring, v);
105 
106    fd5_emit_cs_state(ctx, ring, v);
107    fd5_emit_cs_consts(v, ring, ctx, info);
108 
109    u_foreach_bit (i, ctx->global_bindings.enabled_mask)
110       nglobal++;
111 
112    if (nglobal > 0) {
113       /* global resources don't otherwise get an OUT_RELOC(), since
114        * the raw ptr address is emitted ir ir3_emit_cs_consts().
115        * So to make the kernel aware that these buffers are referenced
116        * by the batch, emit dummy reloc's as part of a no-op packet
117        * payload:
118        */
119       OUT_PKT7(ring, CP_NOP, 2 * nglobal);
120       u_foreach_bit (i, ctx->global_bindings.enabled_mask) {
121          struct pipe_resource *prsc = ctx->global_bindings.buf[i];
122          OUT_RELOC(ring, fd_resource(prsc)->bo, 0, 0, 0);
123       }
124    }
125 
126    const unsigned *local_size =
127       info->block; // v->shader->nir->info->workgroup_size;
128    const unsigned *num_groups = info->grid;
129    /* for some reason, mesa/st doesn't set info->work_dim, so just assume 3: */
130    const unsigned work_dim = info->work_dim ? info->work_dim : 3;
131    OUT_PKT4(ring, REG_A5XX_HLSQ_CS_NDRANGE_0, 7);
132    OUT_RING(ring, A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM(work_dim) |
133                      A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(local_size[0] - 1) |
134                      A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(local_size[1] - 1) |
135                      A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(local_size[2] - 1));
136    OUT_RING(ring,
137             A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(local_size[0] * num_groups[0]));
138    OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_2_GLOBALOFF_X */
139    OUT_RING(ring,
140             A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(local_size[1] * num_groups[1]));
141    OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_4_GLOBALOFF_Y */
142    OUT_RING(ring,
143             A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(local_size[2] * num_groups[2]));
144    OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_6_GLOBALOFF_Z */
145 
146    OUT_PKT4(ring, REG_A5XX_HLSQ_CS_KERNEL_GROUP_X, 3);
147    OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_X */
148    OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_Y */
149    OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_Z */
150 
151    if (info->indirect) {
152       struct fd_resource *rsc = fd_resource(info->indirect);
153 
154       fd5_emit_flush(ctx, ring);
155 
156       OUT_PKT7(ring, CP_EXEC_CS_INDIRECT, 4);
157       OUT_RING(ring, 0x00000000);
158       OUT_RELOC(ring, rsc->bo, info->indirect_offset, 0, 0); /* ADDR_LO/HI */
159       OUT_RING(ring,
160                A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size[0] - 1) |
161                   A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size[1] - 1) |
162                   A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size[2] - 1));
163    } else {
164       OUT_PKT7(ring, CP_EXEC_CS, 4);
165       OUT_RING(ring, 0x00000000);
166       OUT_RING(ring, CP_EXEC_CS_1_NGROUPS_X(info->grid[0]));
167       OUT_RING(ring, CP_EXEC_CS_2_NGROUPS_Y(info->grid[1]));
168       OUT_RING(ring, CP_EXEC_CS_3_NGROUPS_Z(info->grid[2]));
169    }
170 }
171 
172 void
fd5_compute_init(struct pipe_context * pctx)173 fd5_compute_init(struct pipe_context *pctx) disable_thread_safety_analysis
174 {
175    struct fd_context *ctx = fd_context(pctx);
176    ctx->launch_grid = fd5_launch_grid;
177    pctx->create_compute_state = ir3_shader_compute_state_create;
178    pctx->delete_compute_state = ir3_shader_state_delete;
179 }
180