xref: /aosp_15_r20/external/mesa3d/src/intel/compiler/brw_nir_lower_cs_intrinsics.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright (c) 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "brw_nir.h"
25 #include "compiler/nir/nir_builder.h"
26 
27 struct lower_intrinsics_state {
28    nir_shader *nir;
29    nir_function_impl *impl;
30    bool progress;
31    bool hw_generated_local_id;
32    nir_builder builder;
33 
34    /* Per-block cached values. */
35    bool computed;
36    nir_def *hw_index;
37    nir_def *local_index;
38    nir_def *local_id;
39 };
40 
41 static void
compute_local_index_id(struct lower_intrinsics_state * state,nir_intrinsic_instr * current)42 compute_local_index_id(struct lower_intrinsics_state *state, nir_intrinsic_instr *current)
43 {
44    assert(!state->computed);
45    state->hw_index = NULL;
46    state->local_index = NULL;
47    state->local_id = NULL;
48    state->computed = true;
49 
50    nir_shader *nir = state->nir;
51    nir_builder *b = &state->builder;
52 
53    if (!nir->info.workgroup_size_variable) {
54       /* Don't calculate anything for a single invocation workgroup. */
55       const uint16_t *ws = nir->info.workgroup_size;
56       if (ws[0] * ws[1] * ws[2] == 1) {
57          nir_def *zero = nir_imm_int(b, 0);
58          state->local_index = zero;
59          state->local_id = nir_replicate(b, zero, 3);
60          return;
61       }
62 
63       if (state->hw_generated_local_id) {
64          assert(nir->info.derivative_group != DERIVATIVE_GROUP_QUADS);
65 
66          nir_def *local_id_vec = nir_load_local_invocation_id(b);
67          nir_def *local_id[3] = { nir_channel(b, local_id_vec, 0),
68                                   nir_channel(b, local_id_vec, 1),
69                                   nir_channel(b, local_id_vec, 2) };
70          nir_def *size_x = nir_imm_int(b, nir->info.workgroup_size[0]);
71          nir_def *size_y = nir_imm_int(b, nir->info.workgroup_size[1]);
72 
73          nir_def *local_index = nir_imul(b, local_id[2], nir_imul(b, size_x, size_y));
74          local_index = nir_iadd(b, local_index, nir_imul(b, local_id[1], size_x));
75          local_index = nir_iadd(b, local_index, local_id[0]);
76 
77          state->local_index = local_index;
78          state->local_id = NULL;
79          return;
80       }
81    }
82 
83    /* Linear index.  Depending on the heuristic or the derivative group, will
84     * need to be processed to become the actual local_index.
85     */
86    nir_def *linear;
87 
88    if (nir->info.stage == MESA_SHADER_MESH || nir->info.stage == MESA_SHADER_TASK) {
89       /* Thread payload provides a linear index, keep track of it
90        * so it doesn't get removed.
91        */
92       state->hw_index =
93          current->intrinsic == nir_intrinsic_load_local_invocation_index ?
94          &current->def : nir_load_local_invocation_index(b);
95       linear = state->hw_index;
96    } else {
97       nir_def *subgroup_id = nir_load_subgroup_id(b);
98       nir_def *thread_local_id =
99          nir_imul(b, subgroup_id, nir_load_simd_width_intel(b));
100       nir_def *channel = nir_load_subgroup_invocation(b);
101       linear = nir_iadd(b, channel, thread_local_id);
102    }
103 
104    nir_def *size_x;
105    nir_def *size_y;
106    if (nir->info.workgroup_size_variable) {
107       nir_def *size_xyz = nir_load_workgroup_size(b);
108       size_x = nir_channel(b, size_xyz, 0);
109       size_y = nir_channel(b, size_xyz, 1);
110    } else {
111       size_x = nir_imm_int(b, nir->info.workgroup_size[0]);
112       size_y = nir_imm_int(b, nir->info.workgroup_size[1]);
113    }
114    nir_def *size_xy = nir_imul(b, size_x, size_y);
115 
116    /* The local invocation index and ID must respect the following
117     *
118     *    gl_LocalInvocationID.x =
119     *       gl_LocalInvocationIndex % gl_WorkGroupSize.x;
120     *    gl_LocalInvocationID.y =
121     *       (gl_LocalInvocationIndex / gl_WorkGroupSize.x) %
122     *       gl_WorkGroupSize.y;
123     *    gl_LocalInvocationID.z =
124     *       (gl_LocalInvocationIndex /
125     *        (gl_WorkGroupSize.x * gl_WorkGroupSize.y)) %
126     *       gl_WorkGroupSize.z;
127     *
128     * However, the final % gl_WorkGroupSize.z does nothing unless we
129     * accidentally end up with a gl_LocalInvocationIndex that is too
130     * large so it can safely be omitted.
131     */
132 
133    nir_def *id_x, *id_y, *id_z;
134    switch (nir->info.derivative_group) {
135    case DERIVATIVE_GROUP_NONE:
136       if (nir->info.num_images == 0 &&
137           nir->info.num_textures == 0) {
138          /* X-major lid order. Optimal for linear accesses only,
139           * which are usually buffers. X,Y ordering will look like:
140           * (0,0) (1,0) (2,0) ... (size_x-1,0) (0,1) (1,1) ...
141           */
142          id_x = nir_umod(b, linear, size_x);
143          id_y = nir_umod(b, nir_udiv(b, linear, size_x), size_y);
144          state->local_index = linear;
145       } else if (!nir->info.workgroup_size_variable &&
146                  nir->info.workgroup_size[1] % 4 == 0) {
147          /* 1x4 block X-major lid order. Same as X-major except increments in
148           * blocks of width=1 height=4. Always optimal for tileY and usually
149           * optimal for linear accesses.
150           *   x = (linear / 4) % size_x
151           *   y = ((linear % 4) + (linear / 4 / size_x) * 4) % size_y
152           * X,Y ordering will look like: (0,0) (0,1) (0,2) (0,3) (1,0) (1,1)
153           * (1,2) (1,3) (2,0) ... (size_x-1,3) (0,4) (0,5) (0,6) (0,7) (1,4) ...
154           */
155          const unsigned height = 4;
156          nir_def *block = nir_udiv_imm(b, linear, height);
157          id_x = nir_umod(b, block, size_x);
158          id_y = nir_umod(b,
159                          nir_iadd(b,
160                                   nir_umod_imm(b, linear, height),
161                                   nir_imul_imm(b,
162                                                nir_udiv(b, block, size_x),
163                                                height)),
164                          size_y);
165       } else {
166          /* Y-major lid order. Optimal for tileY accesses only,
167           * which are usually images. X,Y ordering will look like:
168           * (0,0) (0,1) (0,2) ... (0,size_y-1) (1,0) (1,1) ...
169           */
170          id_y = nir_umod(b, linear, size_y);
171          id_x = nir_umod(b, nir_udiv(b, linear, size_y), size_x);
172       }
173 
174       id_z = nir_udiv(b, linear, size_xy);
175       state->local_id = nir_vec3(b, id_x, id_y, id_z);
176       if (!state->local_index) {
177          state->local_index = nir_iadd(b, nir_iadd(b, id_x,
178                                                    nir_imul(b, id_y, size_x)),
179                                                    nir_imul(b, id_z, size_xy));
180       }
181       break;
182    case DERIVATIVE_GROUP_LINEAR:
183       /* For linear, just set the local invocation index linearly,
184        * and calculate local invocation ID from that.
185        */
186       id_x = nir_umod(b, linear, size_x);
187       id_y = nir_umod(b, nir_udiv(b, linear, size_x), size_y);
188       id_z = nir_udiv(b, linear, size_xy);
189       state->local_id = nir_vec3(b, id_x, id_y, id_z);
190       state->local_index = linear;
191       break;
192    case DERIVATIVE_GROUP_QUADS: {
193       /* For quads, first we figure out the 2x2 grid the invocation
194        * belongs to -- treating extra Z layers as just more rows.
195        * Then map that into local invocation ID (trivial) and local
196        * invocation index.  Skipping Z simplify index calculation.
197        */
198 
199       nir_def *one = nir_imm_int(b, 1);
200       nir_def *double_size_x = nir_ishl(b, size_x, one);
201 
202       /* ID within a pair of rows, where each group of 4 is 2x2 quad. */
203       nir_def *row_pair_id = nir_umod(b, linear, double_size_x);
204       nir_def *y_row_pairs = nir_udiv(b, linear, double_size_x);
205 
206       nir_def *x =
207          nir_ior(b,
208                  nir_iand(b, row_pair_id, one),
209                  nir_iand(b, nir_ishr(b, row_pair_id, one),
210                           nir_imm_int(b, 0xfffffffe)));
211       nir_def *y =
212          nir_ior(b,
213                  nir_ishl(b, y_row_pairs, one),
214                  nir_iand(b, nir_ishr(b, row_pair_id, one), one));
215 
216       state->local_id = nir_vec3(b, x,
217                                  nir_umod(b, y, size_y),
218                                  nir_udiv(b, y, size_y));
219       state->local_index = nir_iadd(b, x, nir_imul(b, y, size_x));
220       break;
221    }
222    default:
223       unreachable("invalid derivative group");
224    }
225 }
226 
227 static bool
lower_cs_intrinsics_convert_block(struct lower_intrinsics_state * state,nir_block * block)228 lower_cs_intrinsics_convert_block(struct lower_intrinsics_state *state,
229                                   nir_block *block)
230 {
231    bool progress = false;
232    nir_builder *b = &state->builder;
233    nir_shader *nir = state->nir;
234 
235    /* Reset per-block definitions. */
236    state->computed = false;
237 
238    nir_foreach_instr_safe(instr, block) {
239       if (instr->type != nir_instr_type_intrinsic)
240          continue;
241 
242       nir_intrinsic_instr *intrinsic = nir_instr_as_intrinsic(instr);
243 
244       b->cursor = nir_after_instr(&intrinsic->instr);
245 
246       nir_def *sysval;
247       switch (intrinsic->intrinsic) {
248       case nir_intrinsic_load_local_invocation_id: {
249          if (!state->computed)
250             compute_local_index_id(state, intrinsic);
251 
252          if (!state->local_id) {
253             /* Will be lowered later by the backend. */
254             assert(state->hw_generated_local_id);
255             continue;
256          }
257 
258          sysval = state->local_id;
259          break;
260       }
261 
262       case nir_intrinsic_load_local_invocation_index: {
263          if (!state->computed)
264             compute_local_index_id(state, intrinsic);
265 
266          /* Will be lowered later by the backend. */
267          if (&intrinsic->def == state->hw_index)
268             continue;
269 
270          assert(state->local_index);
271          sysval = state->local_index;
272          break;
273       }
274 
275       case nir_intrinsic_load_num_subgroups: {
276          nir_def *size;
277          if (state->nir->info.workgroup_size_variable) {
278             nir_def *size_xyz = nir_load_workgroup_size(b);
279             nir_def *size_x = nir_channel(b, size_xyz, 0);
280             nir_def *size_y = nir_channel(b, size_xyz, 1);
281             nir_def *size_z = nir_channel(b, size_xyz, 2);
282             size = nir_imul(b, nir_imul(b, size_x, size_y), size_z);
283          } else {
284             size = nir_imm_int(b, nir->info.workgroup_size[0] *
285                                   nir->info.workgroup_size[1] *
286                                   nir->info.workgroup_size[2]);
287          }
288 
289          /* Calculate the equivalent of DIV_ROUND_UP. */
290          nir_def *simd_width = nir_load_simd_width_intel(b);
291          sysval =
292             nir_udiv(b, nir_iadd_imm(b, nir_iadd(b, size, simd_width), -1),
293                         simd_width);
294          break;
295       }
296 
297       default:
298          continue;
299       }
300 
301       if (intrinsic->def.bit_size == 64)
302          sysval = nir_u2u64(b, sysval);
303 
304       nir_def_replace(&intrinsic->def, sysval);
305 
306       state->progress = true;
307    }
308 
309    return progress;
310 }
311 
312 static void
lower_cs_intrinsics_convert_impl(struct lower_intrinsics_state * state)313 lower_cs_intrinsics_convert_impl(struct lower_intrinsics_state *state)
314 {
315    state->builder = nir_builder_create(state->impl);
316 
317    nir_foreach_block(block, state->impl) {
318       lower_cs_intrinsics_convert_block(state, block);
319    }
320 
321    nir_metadata_preserve(state->impl,
322                          nir_metadata_control_flow);
323 }
324 
325 bool
brw_nir_lower_cs_intrinsics(nir_shader * nir,const struct intel_device_info * devinfo,struct brw_cs_prog_data * prog_data)326 brw_nir_lower_cs_intrinsics(nir_shader *nir,
327                             const struct intel_device_info *devinfo,
328                             struct brw_cs_prog_data *prog_data)
329 {
330    assert(gl_shader_stage_uses_workgroup(nir->info.stage));
331 
332    struct lower_intrinsics_state state = {
333       .nir = nir,
334       .hw_generated_local_id = false,
335    };
336 
337    /* Constraints from NV_compute_shader_derivatives. */
338    if (!nir->info.workgroup_size_variable) {
339       if (nir->info.derivative_group == DERIVATIVE_GROUP_QUADS) {
340          assert(nir->info.workgroup_size[0] % 2 == 0);
341          assert(nir->info.workgroup_size[1] % 2 == 0);
342       } else if (nir->info.derivative_group == DERIVATIVE_GROUP_LINEAR) {
343          ASSERTED unsigned workgroup_size =
344             nir->info.workgroup_size[0] *
345             nir->info.workgroup_size[1] *
346             nir->info.workgroup_size[2];
347          assert(workgroup_size % 4 == 0);
348       }
349    }
350 
351    if (devinfo->verx10 >= 125 && prog_data &&
352        nir->info.stage == MESA_SHADER_COMPUTE &&
353        nir->info.derivative_group != DERIVATIVE_GROUP_QUADS &&
354        !nir->info.workgroup_size_variable &&
355        util_is_power_of_two_nonzero(nir->info.workgroup_size[0]) &&
356        util_is_power_of_two_nonzero(nir->info.workgroup_size[1])) {
357 
358       state.hw_generated_local_id = true;
359 
360       /* TODO: more heuristics about 1D/SLM access vs. 2D access */
361       bool linear =
362          nir->info.derivative_group == DERIVATIVE_GROUP_LINEAR ||
363          BITSET_TEST(nir->info.system_values_read,
364                      SYSTEM_VALUE_LOCAL_INVOCATION_INDEX) ||
365          (nir->info.workgroup_size[1] == 1 &&
366           nir->info.workgroup_size[2] == 1) ||
367          (nir->info.num_images == 0 && nir->info.num_textures == 0);
368 
369       prog_data->walk_order =
370          linear ? INTEL_WALK_ORDER_XYZ : INTEL_WALK_ORDER_YXZ;
371 
372       /* nir_lower_compute_system_values will replace any references to
373        * SYSTEM_VALUE_LOCAL_INVOCATION_ID vector components with zero for
374        * any dimension where the workgroup size is 1, so we can skip
375        * generating those.  However, the hardware can only generate
376        * X, XY, or XYZ - it can't skip earlier components.
377        */
378       prog_data->generate_local_id =
379          (nir->info.workgroup_size[0] > 1 ? WRITEMASK_X   : 0) |
380          (nir->info.workgroup_size[1] > 1 ? WRITEMASK_XY  : 0) |
381          (nir->info.workgroup_size[2] > 1 ? WRITEMASK_XYZ : 0);
382    }
383 
384    nir_foreach_function_impl(impl, nir) {
385       state.impl = impl;
386       lower_cs_intrinsics_convert_impl(&state);
387    }
388 
389    return state.progress;
390 }
391