xref: /aosp_15_r20/external/mesa3d/src/intel/compiler/brw_nir.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "intel_nir.h"
25 #include "brw_nir.h"
26 #include "compiler/glsl_types.h"
27 #include "compiler/nir/nir_builder.h"
28 
29 /*
30  * Returns the minimum number of vec4 (as_vec4 == true) or dvec4 (as_vec4 ==
31  * false) elements needed to pack a type.
32  */
33 static int
type_size_xvec4(const struct glsl_type * type,bool as_vec4,bool bindless)34 type_size_xvec4(const struct glsl_type *type, bool as_vec4, bool bindless)
35 {
36    unsigned int i;
37    int size;
38 
39    switch (type->base_type) {
40    case GLSL_TYPE_UINT:
41    case GLSL_TYPE_INT:
42    case GLSL_TYPE_FLOAT:
43    case GLSL_TYPE_FLOAT16:
44    case GLSL_TYPE_BOOL:
45    case GLSL_TYPE_DOUBLE:
46    case GLSL_TYPE_UINT16:
47    case GLSL_TYPE_INT16:
48    case GLSL_TYPE_UINT8:
49    case GLSL_TYPE_INT8:
50    case GLSL_TYPE_UINT64:
51    case GLSL_TYPE_INT64:
52       if (glsl_type_is_matrix(type)) {
53          const glsl_type *col_type = glsl_get_column_type(type);
54          unsigned col_slots =
55             (as_vec4 && glsl_type_is_dual_slot(col_type)) ? 2 : 1;
56          return type->matrix_columns * col_slots;
57       } else {
58          /* Regardless of size of vector, it gets a vec4. This is bad
59           * packing for things like floats, but otherwise arrays become a
60           * mess.  Hopefully a later pass over the code can pack scalars
61           * down if appropriate.
62           */
63          return (as_vec4 && glsl_type_is_dual_slot(type)) ? 2 : 1;
64       }
65    case GLSL_TYPE_ARRAY:
66       assert(type->length > 0);
67       return type_size_xvec4(type->fields.array, as_vec4, bindless) *
68              type->length;
69    case GLSL_TYPE_STRUCT:
70    case GLSL_TYPE_INTERFACE:
71       size = 0;
72       for (i = 0; i < type->length; i++) {
73 	 size += type_size_xvec4(type->fields.structure[i].type, as_vec4,
74                                  bindless);
75       }
76       return size;
77    case GLSL_TYPE_SUBROUTINE:
78       return 1;
79 
80    case GLSL_TYPE_SAMPLER:
81    case GLSL_TYPE_TEXTURE:
82       /* Samplers and textures take up no register space, since they're baked
83        * in at link time.
84        */
85       return bindless ? 1 : 0;
86    case GLSL_TYPE_ATOMIC_UINT:
87       return 0;
88    case GLSL_TYPE_IMAGE:
89       return bindless ? 1 : 0;
90    case GLSL_TYPE_VOID:
91    case GLSL_TYPE_ERROR:
92    case GLSL_TYPE_COOPERATIVE_MATRIX:
93       unreachable("not reached");
94    }
95 
96    return 0;
97 }
98 
99 /**
100  * Returns the minimum number of vec4 elements needed to pack a type.
101  *
102  * For simple types, it will return 1 (a single vec4); for matrices, the
103  * number of columns; for array and struct, the sum of the vec4_size of
104  * each of its elements; and for sampler and atomic, zero.
105  *
106  * This method is useful to calculate how much register space is needed to
107  * store a particular type.
108  */
109 int
type_size_vec4(const struct glsl_type * type,bool bindless)110 type_size_vec4(const struct glsl_type *type, bool bindless)
111 {
112    return type_size_xvec4(type, true, bindless);
113 }
114 
115 /**
116  * Returns the minimum number of dvec4 elements needed to pack a type.
117  *
118  * For simple types, it will return 1 (a single dvec4); for matrices, the
119  * number of columns; for array and struct, the sum of the dvec4_size of
120  * each of its elements; and for sampler and atomic, zero.
121  *
122  * This method is useful to calculate how much register space is needed to
123  * store a particular type.
124  *
125  * Measuring double-precision vertex inputs as dvec4 is required because
126  * ARB_vertex_attrib_64bit states that these uses the same number of locations
127  * than the single-precision version. That is, two consecutives dvec4 would be
128  * located in location "x" and location "x+1", not "x+2".
129  *
130  * In order to map vec4/dvec4 vertex inputs in the proper ATTRs,
131  * remap_vs_attrs() will take in account both the location and also if the
132  * type fits in one or two vec4 slots.
133  */
134 int
type_size_dvec4(const struct glsl_type * type,bool bindless)135 type_size_dvec4(const struct glsl_type *type, bool bindless)
136 {
137    return type_size_xvec4(type, false, bindless);
138 }
139 
140 static bool
remap_tess_levels(nir_builder * b,nir_intrinsic_instr * intr,enum tess_primitive_mode _primitive_mode)141 remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
142                   enum tess_primitive_mode _primitive_mode)
143 {
144    const int location = nir_intrinsic_base(intr);
145    const unsigned component = nir_intrinsic_component(intr);
146    bool out_of_bounds = false;
147    bool write = !nir_intrinsic_infos[intr->intrinsic].has_dest;
148    unsigned mask = write ? nir_intrinsic_write_mask(intr) : 0;
149    nir_def *src = NULL, *dest = NULL;
150 
151    if (write) {
152       assert(intr->num_components == intr->src[0].ssa->num_components);
153    } else {
154       assert(intr->num_components == intr->def.num_components);
155    }
156 
157    if (location == VARYING_SLOT_TESS_LEVEL_INNER) {
158       b->cursor = write ? nir_before_instr(&intr->instr)
159                         : nir_after_instr(&intr->instr);
160 
161       switch (_primitive_mode) {
162       case TESS_PRIMITIVE_QUADS:
163          /* gl_TessLevelInner[0..1] lives at DWords 3-2 (reversed). */
164          nir_intrinsic_set_base(intr, 0);
165 
166          if (write) {
167             assert(intr->src[0].ssa->num_components == 2);
168 
169             intr->num_components = 4;
170 
171             nir_def *undef = nir_undef(b, 1, 32);
172             nir_def *x = nir_channel(b, intr->src[0].ssa, 0);
173             nir_def *y = nir_channel(b, intr->src[0].ssa, 1);
174             src = nir_vec4(b, undef, undef, y, x);
175             mask = !!(mask & WRITEMASK_X) << 3 | !!(mask & WRITEMASK_Y) << 2;
176          } else if (intr->def.num_components > 1) {
177             assert(intr->def.num_components == 2);
178 
179             intr->num_components = 4;
180             intr->def.num_components = 4;
181 
182             unsigned wz[2] = { 3, 2 };
183             dest = nir_swizzle(b, &intr->def, wz, 2);
184          } else {
185             nir_intrinsic_set_component(intr, 3 - component);
186          }
187          break;
188       case TESS_PRIMITIVE_TRIANGLES:
189          /* gl_TessLevelInner[0] lives at DWord 4. */
190          nir_intrinsic_set_base(intr, 1);
191          mask &= WRITEMASK_X;
192          out_of_bounds = component > 0;
193          break;
194       case TESS_PRIMITIVE_ISOLINES:
195          out_of_bounds = true;
196          break;
197       default:
198          unreachable("Bogus tessellation domain");
199       }
200    } else if (location == VARYING_SLOT_TESS_LEVEL_OUTER) {
201       b->cursor = write ? nir_before_instr(&intr->instr)
202                         : nir_after_instr(&intr->instr);
203 
204       nir_intrinsic_set_base(intr, 1);
205 
206       switch (_primitive_mode) {
207       case TESS_PRIMITIVE_QUADS:
208       case TESS_PRIMITIVE_TRIANGLES:
209          /* Quads:     gl_TessLevelOuter[0..3] lives at DWords 7-4 (reversed).
210           * Triangles: gl_TessLevelOuter[0..2] lives at DWords 7-5 (reversed).
211           */
212          if (write) {
213             assert(intr->src[0].ssa->num_components == 4);
214 
215             unsigned wzyx[4] = { 3, 2, 1, 0 };
216             src = nir_swizzle(b, intr->src[0].ssa, wzyx, 4);
217             mask = !!(mask & WRITEMASK_X) << 3 | !!(mask & WRITEMASK_Y) << 2 |
218                    !!(mask & WRITEMASK_Z) << 1 | !!(mask & WRITEMASK_W) << 0;
219 
220             /* Don't overwrite the inner factor at DWord 4 for triangles */
221             if (_primitive_mode == TESS_PRIMITIVE_TRIANGLES)
222                mask &= ~WRITEMASK_X;
223          } else if (intr->def.num_components > 1) {
224             assert(intr->def.num_components == 4);
225 
226             unsigned wzyx[4] = { 3, 2, 1, 0 };
227             dest = nir_swizzle(b, &intr->def, wzyx, 4);
228          } else {
229             nir_intrinsic_set_component(intr, 3 - component);
230             out_of_bounds = component == 3 &&
231                             _primitive_mode == TESS_PRIMITIVE_TRIANGLES;
232          }
233          break;
234       case TESS_PRIMITIVE_ISOLINES:
235          /* gl_TessLevelOuter[0..1] lives at DWords 6-7 (in order). */
236          if (write) {
237             assert(intr->src[0].ssa->num_components == 4);
238 
239             nir_def *undef = nir_undef(b, 1, 32);
240             nir_def *x = nir_channel(b, intr->src[0].ssa, 0);
241             nir_def *y = nir_channel(b, intr->src[0].ssa, 1);
242             src = nir_vec4(b, undef, undef, x, y);
243             mask = !!(mask & WRITEMASK_X) << 2 | !!(mask & WRITEMASK_Y) << 3;
244          } else {
245             nir_intrinsic_set_component(intr, 2 + component);
246             out_of_bounds = component > 1;
247          }
248          break;
249       default:
250          unreachable("Bogus tessellation domain");
251       }
252    } else {
253       return false;
254    }
255 
256    if (out_of_bounds) {
257       if (!write)
258          nir_def_rewrite_uses(&intr->def, nir_undef(b, 1, 32));
259       nir_instr_remove(&intr->instr);
260    } else if (write) {
261       nir_intrinsic_set_write_mask(intr, mask);
262 
263       if (src) {
264          nir_src_rewrite(&intr->src[0], src);
265       }
266    } else if (dest) {
267       nir_def_rewrite_uses_after(&intr->def, dest,
268                                      dest->parent_instr);
269    }
270 
271    return true;
272 }
273 
274 static bool
is_input(nir_intrinsic_instr * intrin)275 is_input(nir_intrinsic_instr *intrin)
276 {
277    return intrin->intrinsic == nir_intrinsic_load_input ||
278           intrin->intrinsic == nir_intrinsic_load_per_primitive_input ||
279           intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
280           intrin->intrinsic == nir_intrinsic_load_interpolated_input;
281 }
282 
283 static bool
is_output(nir_intrinsic_instr * intrin)284 is_output(nir_intrinsic_instr *intrin)
285 {
286    return intrin->intrinsic == nir_intrinsic_load_output ||
287           intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
288           intrin->intrinsic == nir_intrinsic_store_output ||
289           intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
290 }
291 
292 
293 static bool
remap_patch_urb_offsets(nir_block * block,nir_builder * b,const struct intel_vue_map * vue_map,enum tess_primitive_mode tes_primitive_mode)294 remap_patch_urb_offsets(nir_block *block, nir_builder *b,
295                         const struct intel_vue_map *vue_map,
296                         enum tess_primitive_mode tes_primitive_mode)
297 {
298    nir_foreach_instr_safe(instr, block) {
299       if (instr->type != nir_instr_type_intrinsic)
300          continue;
301 
302       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
303 
304       gl_shader_stage stage = b->shader->info.stage;
305 
306       if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
307           (stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
308 
309          if (remap_tess_levels(b, intrin, tes_primitive_mode))
310             continue;
311 
312          int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
313          assert(vue_slot != -1);
314          intrin->const_index[0] = vue_slot;
315 
316          nir_src *vertex = nir_get_io_arrayed_index_src(intrin);
317          if (vertex) {
318             if (nir_src_is_const(*vertex)) {
319                intrin->const_index[0] += nir_src_as_uint(*vertex) *
320                                          vue_map->num_per_vertex_slots;
321             } else {
322                b->cursor = nir_before_instr(&intrin->instr);
323 
324                /* Multiply by the number of per-vertex slots. */
325                nir_def *vertex_offset =
326                   nir_imul(b,
327                            vertex->ssa,
328                            nir_imm_int(b,
329                                        vue_map->num_per_vertex_slots));
330 
331                /* Add it to the existing offset */
332                nir_src *offset = nir_get_io_offset_src(intrin);
333                nir_def *total_offset =
334                   nir_iadd(b, vertex_offset,
335                            offset->ssa);
336 
337                nir_src_rewrite(offset, total_offset);
338             }
339          }
340       }
341    }
342    return true;
343 }
344 
345 void
brw_nir_lower_vs_inputs(nir_shader * nir)346 brw_nir_lower_vs_inputs(nir_shader *nir)
347 {
348    /* Start with the location of the variable's base. */
349    nir_foreach_shader_in_variable(var, nir)
350       var->data.driver_location = var->data.location;
351 
352    /* Now use nir_lower_io to walk dereference chains.  Attribute arrays are
353     * loaded as one vec4 or dvec4 per element (or matrix column), depending on
354     * whether it is a double-precision type or not.
355     */
356    nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
357                 nir_lower_io_lower_64bit_to_32);
358 
359    /* This pass needs actual constants */
360    nir_opt_constant_folding(nir);
361 
362    nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
363 
364    /* The last step is to remap VERT_ATTRIB_* to actual registers */
365 
366    /* Whether or not we have any system generated values.  gl_DrawID is not
367     * included here as it lives in its own vec4.
368     */
369    const bool has_sgvs =
370       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX) ||
371       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) ||
372       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) ||
373       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID);
374 
375    const unsigned num_inputs = util_bitcount64(nir->info.inputs_read);
376 
377    nir_foreach_function_impl(impl, nir) {
378       nir_builder b = nir_builder_create(impl);
379 
380       nir_foreach_block(block, impl) {
381          nir_foreach_instr_safe(instr, block) {
382             if (instr->type != nir_instr_type_intrinsic)
383                continue;
384 
385             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
386 
387             switch (intrin->intrinsic) {
388             case nir_intrinsic_load_first_vertex:
389             case nir_intrinsic_load_base_instance:
390             case nir_intrinsic_load_vertex_id_zero_base:
391             case nir_intrinsic_load_instance_id:
392             case nir_intrinsic_load_is_indexed_draw:
393             case nir_intrinsic_load_draw_id: {
394                b.cursor = nir_after_instr(&intrin->instr);
395 
396                /* gl_VertexID and friends are stored by the VF as the last
397                 * vertex element.  We convert them to load_input intrinsics at
398                 * the right location.
399                 */
400                nir_intrinsic_instr *load =
401                   nir_intrinsic_instr_create(nir, nir_intrinsic_load_input);
402                load->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
403 
404                nir_intrinsic_set_base(load, num_inputs);
405                switch (intrin->intrinsic) {
406                case nir_intrinsic_load_first_vertex:
407                   nir_intrinsic_set_component(load, 0);
408                   break;
409                case nir_intrinsic_load_base_instance:
410                   nir_intrinsic_set_component(load, 1);
411                   break;
412                case nir_intrinsic_load_vertex_id_zero_base:
413                   nir_intrinsic_set_component(load, 2);
414                   break;
415                case nir_intrinsic_load_instance_id:
416                   nir_intrinsic_set_component(load, 3);
417                   break;
418                case nir_intrinsic_load_draw_id:
419                case nir_intrinsic_load_is_indexed_draw:
420                   /* gl_DrawID and IsIndexedDraw are stored right after
421                    * gl_VertexID and friends if any of them exist.
422                    */
423                   nir_intrinsic_set_base(load, num_inputs + has_sgvs);
424                   if (intrin->intrinsic == nir_intrinsic_load_draw_id)
425                      nir_intrinsic_set_component(load, 0);
426                   else
427                      nir_intrinsic_set_component(load, 1);
428                   break;
429                default:
430                   unreachable("Invalid system value intrinsic");
431                }
432 
433                load->num_components = 1;
434                nir_def_init(&load->instr, &load->def, 1, 32);
435                nir_builder_instr_insert(&b, &load->instr);
436 
437                nir_def_replace(&intrin->def, &load->def);
438                break;
439             }
440 
441             case nir_intrinsic_load_input: {
442                /* Attributes come in a contiguous block, ordered by their
443                 * gl_vert_attrib value.  That means we can compute the slot
444                 * number for an attribute by masking out the enabled attributes
445                 * before it and counting the bits.
446                 */
447                int attr = nir_intrinsic_base(intrin);
448                int slot = util_bitcount64(nir->info.inputs_read &
449                                           BITFIELD64_MASK(attr));
450                nir_intrinsic_set_base(intrin, slot);
451                break;
452             }
453 
454             default:
455                break; /* Nothing to do */
456             }
457          }
458       }
459    }
460 }
461 
462 void
brw_nir_lower_vue_inputs(nir_shader * nir,const struct intel_vue_map * vue_map)463 brw_nir_lower_vue_inputs(nir_shader *nir,
464                          const struct intel_vue_map *vue_map)
465 {
466    nir_foreach_shader_in_variable(var, nir)
467       var->data.driver_location = var->data.location;
468 
469    /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
470    nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
471                 nir_lower_io_lower_64bit_to_32);
472 
473    /* This pass needs actual constants */
474    nir_opt_constant_folding(nir);
475 
476    nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
477 
478    nir_foreach_function_impl(impl, nir) {
479       nir_foreach_block(block, impl) {
480          nir_foreach_instr(instr, block) {
481             if (instr->type != nir_instr_type_intrinsic)
482                continue;
483 
484             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
485 
486             if (intrin->intrinsic == nir_intrinsic_load_input ||
487                 intrin->intrinsic == nir_intrinsic_load_per_vertex_input) {
488                /* Offset 0 is the VUE header, which contains
489                 * VARYING_SLOT_LAYER [.y], VARYING_SLOT_VIEWPORT [.z], and
490                 * VARYING_SLOT_PSIZ [.w].
491                 */
492                int varying = nir_intrinsic_base(intrin);
493                int vue_slot;
494                switch (varying) {
495                case VARYING_SLOT_PSIZ:
496                   nir_intrinsic_set_base(intrin, 0);
497                   nir_intrinsic_set_component(intrin, 3);
498                   break;
499 
500                default:
501                   vue_slot = vue_map->varying_to_slot[varying];
502                   assert(vue_slot != -1);
503                   nir_intrinsic_set_base(intrin, vue_slot);
504                   break;
505                }
506             }
507          }
508       }
509    }
510 }
511 
512 void
brw_nir_lower_tes_inputs(nir_shader * nir,const struct intel_vue_map * vue_map)513 brw_nir_lower_tes_inputs(nir_shader *nir, const struct intel_vue_map *vue_map)
514 {
515    nir_foreach_shader_in_variable(var, nir)
516       var->data.driver_location = var->data.location;
517 
518    nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
519                 nir_lower_io_lower_64bit_to_32);
520 
521    /* This pass needs actual constants */
522    nir_opt_constant_folding(nir);
523 
524    nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
525 
526    nir_foreach_function_impl(impl, nir) {
527       nir_builder b = nir_builder_create(impl);
528       nir_foreach_block(block, impl) {
529          remap_patch_urb_offsets(block, &b, vue_map,
530                                  nir->info.tess._primitive_mode);
531       }
532    }
533 }
534 
535 static bool
lower_barycentric_per_sample(nir_builder * b,nir_intrinsic_instr * intrin,UNUSED void * cb_data)536 lower_barycentric_per_sample(nir_builder *b,
537                              nir_intrinsic_instr *intrin,
538                              UNUSED void *cb_data)
539 {
540    if (intrin->intrinsic != nir_intrinsic_load_barycentric_pixel &&
541        intrin->intrinsic != nir_intrinsic_load_barycentric_centroid)
542       return false;
543 
544    b->cursor = nir_before_instr(&intrin->instr);
545    nir_def *centroid =
546       nir_load_barycentric(b, nir_intrinsic_load_barycentric_sample,
547                            nir_intrinsic_interp_mode(intrin));
548    nir_def_replace(&intrin->def, centroid);
549    return true;
550 }
551 
552 /**
553  * Convert interpolateAtOffset() offsets from [-0.5, +0.5] floating point
554  * offsets to integer [-8, +7] offsets (in units of 1/16th of a pixel).
555  *
556  * We clamp to +7/16 on the upper end of the range, since +0.5 isn't
557  * representable in a S0.4 value; a naive conversion would give us -8/16,
558  * which is the opposite of what was intended.
559  *
560  * This is allowed by GL_ARB_gpu_shader5's quantization rules:
561  *
562  *    "Not all values of <offset> may be supported; x and y offsets may
563  *     be rounded to fixed-point values with the number of fraction bits
564  *     given by the implementation-dependent constant
565  *     FRAGMENT_INTERPOLATION_OFFSET_BITS."
566  */
567 static bool
lower_barycentric_at_offset(nir_builder * b,nir_intrinsic_instr * intrin,void * data)568 lower_barycentric_at_offset(nir_builder *b, nir_intrinsic_instr *intrin,
569                             void *data)
570 {
571    if (intrin->intrinsic != nir_intrinsic_load_barycentric_at_offset)
572       return false;
573 
574    b->cursor = nir_before_instr(&intrin->instr);
575 
576    assert(intrin->src[0].ssa);
577    nir_def *offset =
578       nir_imin(b, nir_imm_int(b, 7),
579                nir_f2i32(b, nir_fmul_imm(b, intrin->src[0].ssa, 16)));
580 
581    nir_src_rewrite(&intrin->src[0], offset);
582 
583    return true;
584 }
585 
586 void
brw_nir_lower_fs_inputs(nir_shader * nir,const struct intel_device_info * devinfo,const struct brw_wm_prog_key * key)587 brw_nir_lower_fs_inputs(nir_shader *nir,
588                         const struct intel_device_info *devinfo,
589                         const struct brw_wm_prog_key *key)
590 {
591    nir_foreach_shader_in_variable(var, nir) {
592       var->data.driver_location = var->data.location;
593 
594       /* Apply default interpolation mode.
595        *
596        * Everything defaults to smooth except for the legacy GL color
597        * built-in variables, which might be flat depending on API state.
598        */
599       if (var->data.interpolation == INTERP_MODE_NONE) {
600          const bool flat = key->flat_shade &&
601             (var->data.location == VARYING_SLOT_COL0 ||
602              var->data.location == VARYING_SLOT_COL1);
603 
604          var->data.interpolation = flat ? INTERP_MODE_FLAT
605                                         : INTERP_MODE_SMOOTH;
606       }
607    }
608 
609    nir_lower_io(nir, nir_var_shader_in, type_size_vec4,
610                 nir_lower_io_lower_64bit_to_32);
611    if (devinfo->ver >= 11)
612       nir_lower_interpolation(nir, ~0);
613 
614    if (key->multisample_fbo == BRW_NEVER) {
615       nir_lower_single_sampled(nir);
616    } else if (key->persample_interp == BRW_ALWAYS) {
617       nir_shader_intrinsics_pass(nir, lower_barycentric_per_sample,
618                                    nir_metadata_control_flow,
619                                    NULL);
620    }
621 
622    if (devinfo->ver < 20)
623       nir_shader_intrinsics_pass(nir, lower_barycentric_at_offset,
624                                  nir_metadata_control_flow,
625                                  NULL);
626 
627    /* This pass needs actual constants */
628    nir_opt_constant_folding(nir);
629 
630    nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
631 }
632 
633 void
brw_nir_lower_vue_outputs(nir_shader * nir)634 brw_nir_lower_vue_outputs(nir_shader *nir)
635 {
636    nir_foreach_shader_out_variable(var, nir) {
637       var->data.driver_location = var->data.location;
638    }
639 
640    nir_lower_io(nir, nir_var_shader_out, type_size_vec4,
641                 nir_lower_io_lower_64bit_to_32);
642 }
643 
644 void
brw_nir_lower_tcs_outputs(nir_shader * nir,const struct intel_vue_map * vue_map,enum tess_primitive_mode tes_primitive_mode)645 brw_nir_lower_tcs_outputs(nir_shader *nir, const struct intel_vue_map *vue_map,
646                           enum tess_primitive_mode tes_primitive_mode)
647 {
648    nir_foreach_shader_out_variable(var, nir) {
649       var->data.driver_location = var->data.location;
650    }
651 
652    nir_lower_io(nir, nir_var_shader_out, type_size_vec4,
653                 nir_lower_io_lower_64bit_to_32);
654 
655    /* This pass needs actual constants */
656    nir_opt_constant_folding(nir);
657 
658    nir_io_add_const_offset_to_base(nir, nir_var_shader_out);
659 
660    nir_foreach_function_impl(impl, nir) {
661       nir_builder b = nir_builder_create(impl);
662       nir_foreach_block(block, impl) {
663          remap_patch_urb_offsets(block, &b, vue_map, tes_primitive_mode);
664       }
665    }
666 }
667 
668 void
brw_nir_lower_fs_outputs(nir_shader * nir)669 brw_nir_lower_fs_outputs(nir_shader *nir)
670 {
671    nir_foreach_shader_out_variable(var, nir) {
672       var->data.driver_location =
673          SET_FIELD(var->data.index, BRW_NIR_FRAG_OUTPUT_INDEX) |
674          SET_FIELD(var->data.location, BRW_NIR_FRAG_OUTPUT_LOCATION);
675    }
676 
677    nir_lower_io(nir, nir_var_shader_out, type_size_dvec4, 0);
678 }
679 
680 static bool
tag_speculative_access(nir_builder * b,nir_intrinsic_instr * intrin,void * unused)681 tag_speculative_access(nir_builder *b,
682                        nir_intrinsic_instr *intrin,
683                        void *unused)
684 {
685    if (intrin->intrinsic == nir_intrinsic_load_ubo &&
686        brw_nir_ubo_surface_index_is_pushable(intrin->src[0])) {
687       nir_intrinsic_set_access(intrin, ACCESS_CAN_SPECULATE |
688                                nir_intrinsic_access(intrin));
689       return true;
690    }
691 
692    return false;
693 }
694 
695 static bool
brw_nir_tag_speculative_access(nir_shader * nir)696 brw_nir_tag_speculative_access(nir_shader *nir)
697 {
698    return nir_shader_intrinsics_pass(nir, tag_speculative_access,
699                                      nir_metadata_all, NULL);
700 }
701 
702 #define OPT(pass, ...) ({                                  \
703    bool this_progress = false;                             \
704    NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__);      \
705    if (this_progress)                                      \
706       progress = true;                                     \
707    this_progress;                                          \
708 })
709 
710 #define LOOP_OPT(pass, ...) ({                             \
711    const unsigned long this_line = __LINE__;               \
712    bool this_progress = false;                             \
713    if (opt_line == this_line)                              \
714       break;                                               \
715    NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__);      \
716    if (this_progress) {                                    \
717       progress = true;                                     \
718       opt_line = this_line;                                \
719    }                                                       \
720    this_progress;                                          \
721 })
722 
723 #define LOOP_OPT_NOT_IDEMPOTENT(pass, ...) ({              \
724    bool this_progress = false;                             \
725    NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__);      \
726    if (this_progress) {                                    \
727       progress = true;                                     \
728       opt_line = 0;                                        \
729    }                                                       \
730    this_progress;                                          \
731 })
732 
733 void
brw_nir_optimize(nir_shader * nir,const struct intel_device_info * devinfo)734 brw_nir_optimize(nir_shader *nir,
735                  const struct intel_device_info *devinfo)
736 {
737    bool progress;
738    unsigned lower_flrp =
739       (nir->options->lower_flrp16 ? 16 : 0) |
740       (nir->options->lower_flrp32 ? 32 : 0) |
741       (nir->options->lower_flrp64 ? 64 : 0);
742 
743    unsigned long opt_line = 0;
744    do {
745       progress = false;
746       /* This pass is causing problems with types used by OpenCL :
747        *    https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13955
748        *
749        * Running with it disabled made no difference in the resulting assembly
750        * code.
751        */
752       if (nir->info.stage != MESA_SHADER_KERNEL)
753          LOOP_OPT(nir_split_array_vars, nir_var_function_temp);
754       LOOP_OPT(nir_shrink_vec_array_vars, nir_var_function_temp);
755       LOOP_OPT(nir_opt_deref);
756       if (LOOP_OPT(nir_opt_memcpy))
757          LOOP_OPT(nir_split_var_copies);
758       LOOP_OPT(nir_lower_vars_to_ssa);
759       if (!nir->info.var_copies_lowered) {
760          /* Only run this pass if nir_lower_var_copies was not called
761           * yet. That would lower away any copy_deref instructions and we
762           * don't want to introduce any more.
763           */
764          LOOP_OPT(nir_opt_find_array_copies);
765       }
766       LOOP_OPT(nir_opt_copy_prop_vars);
767       LOOP_OPT(nir_opt_dead_write_vars);
768       LOOP_OPT(nir_opt_combine_stores, nir_var_all);
769 
770       LOOP_OPT(nir_opt_ray_queries);
771       LOOP_OPT(nir_opt_ray_query_ranges);
772 
773       LOOP_OPT(nir_lower_alu_to_scalar, NULL, NULL);
774 
775       LOOP_OPT(nir_copy_prop);
776 
777       LOOP_OPT(nir_lower_phis_to_scalar, false);
778 
779       LOOP_OPT(nir_copy_prop);
780       LOOP_OPT(nir_opt_dce);
781       LOOP_OPT(nir_opt_cse);
782       LOOP_OPT(nir_opt_combine_stores, nir_var_all);
783 
784       /* Passing 0 to the peephole select pass causes it to convert
785        * if-statements that contain only move instructions in the branches
786        * regardless of the count.
787        *
788        * Passing 1 to the peephole select pass causes it to convert
789        * if-statements that contain at most a single ALU instruction (total)
790        * in both branches.  Before Gfx6, some math instructions were
791        * prohibitively expensive and the results of compare operations need an
792        * extra resolve step.  For these reasons, this pass is more harmful
793        * than good on those platforms.
794        *
795        * For indirect loads of uniforms (push constants), we assume that array
796        * indices will nearly always be in bounds and the cost of the load is
797        * low.  Therefore there shouldn't be a performance benefit to avoid it.
798        */
799       LOOP_OPT(nir_opt_peephole_select, 0, true, false);
800       LOOP_OPT(nir_opt_peephole_select, 8, true, true);
801 
802       LOOP_OPT(nir_opt_intrinsics);
803       LOOP_OPT(nir_opt_idiv_const, 32);
804       LOOP_OPT_NOT_IDEMPOTENT(nir_opt_algebraic);
805 
806       LOOP_OPT(nir_opt_generate_bfi);
807       LOOP_OPT(nir_opt_reassociate_bfi);
808 
809       LOOP_OPT(nir_lower_constant_convert_alu_types);
810       LOOP_OPT(nir_opt_constant_folding);
811 
812       if (lower_flrp != 0) {
813          if (LOOP_OPT(nir_lower_flrp,
814                  lower_flrp,
815                  false /* always_precise */)) {
816             LOOP_OPT(nir_opt_constant_folding);
817          }
818 
819          /* Nothing should rematerialize any flrps, so we only need to do this
820           * lowering once.
821           */
822          lower_flrp = 0;
823       }
824 
825       LOOP_OPT(nir_opt_dead_cf);
826       if (LOOP_OPT(nir_opt_loop)) {
827          /* If nir_opt_loop makes progress, then we need to clean
828           * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
829           * to make progress.
830           */
831          LOOP_OPT(nir_copy_prop);
832          LOOP_OPT(nir_opt_dce);
833       }
834       LOOP_OPT_NOT_IDEMPOTENT(nir_opt_if, nir_opt_if_optimize_phi_true_false);
835       LOOP_OPT(nir_opt_conditional_discard);
836       if (nir->options->max_unroll_iterations != 0) {
837          LOOP_OPT_NOT_IDEMPOTENT(nir_opt_loop_unroll);
838       }
839       LOOP_OPT(nir_opt_remove_phis);
840       LOOP_OPT(nir_opt_gcm, false);
841       LOOP_OPT(nir_opt_undef);
842       LOOP_OPT(nir_lower_pack);
843    } while (progress);
844 
845    /* Workaround Gfxbench unused local sampler variable which will trigger an
846     * assert in the opt_large_constants pass.
847     */
848    OPT(nir_remove_dead_variables, nir_var_function_temp, NULL);
849 }
850 
851 static unsigned
lower_bit_size_callback(const nir_instr * instr,UNUSED void * data)852 lower_bit_size_callback(const nir_instr *instr, UNUSED void *data)
853 {
854    switch (instr->type) {
855    case nir_instr_type_alu: {
856       nir_alu_instr *alu = nir_instr_as_alu(instr);
857       switch (alu->op) {
858       case nir_op_bit_count:
859       case nir_op_ufind_msb:
860       case nir_op_ifind_msb:
861       case nir_op_find_lsb:
862          /* These are handled specially because the destination is always
863           * 32-bit and so the bit size of the instruction is given by the
864           * source.
865           */
866          return alu->src[0].src.ssa->bit_size >= 32 ? 0 : 32;
867       default:
868          break;
869       }
870 
871       if (alu->def.bit_size >= 32)
872          return 0;
873 
874       /* Note: nir_op_iabs and nir_op_ineg are not lowered here because the
875        * 8-bit ABS or NEG instruction should eventually get copy propagated
876        * into the MOV that does the type conversion.  This results in far
877        * fewer MOV instructions.
878        */
879       switch (alu->op) {
880       case nir_op_idiv:
881       case nir_op_imod:
882       case nir_op_irem:
883       case nir_op_udiv:
884       case nir_op_umod:
885       case nir_op_fceil:
886       case nir_op_ffloor:
887       case nir_op_ffract:
888       case nir_op_fround_even:
889       case nir_op_ftrunc:
890          return 32;
891       case nir_op_frcp:
892       case nir_op_frsq:
893       case nir_op_fsqrt:
894       case nir_op_fpow:
895       case nir_op_fexp2:
896       case nir_op_flog2:
897       case nir_op_fsin:
898       case nir_op_fcos:
899          return 0;
900       case nir_op_isign:
901          assert(!"Should have been lowered by nir_opt_algebraic.");
902          return 0;
903       default:
904          if (nir_op_infos[alu->op].num_inputs >= 2 &&
905              alu->def.bit_size == 8)
906             return 16;
907 
908          if (nir_alu_instr_is_comparison(alu) &&
909              alu->src[0].src.ssa->bit_size == 8)
910             return 16;
911 
912          return 0;
913       }
914       break;
915    }
916 
917    case nir_instr_type_intrinsic: {
918       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
919       switch (intrin->intrinsic) {
920       case nir_intrinsic_read_invocation:
921       case nir_intrinsic_read_first_invocation:
922       case nir_intrinsic_vote_feq:
923       case nir_intrinsic_vote_ieq:
924       case nir_intrinsic_shuffle:
925       case nir_intrinsic_shuffle_xor:
926       case nir_intrinsic_shuffle_up:
927       case nir_intrinsic_shuffle_down:
928       case nir_intrinsic_quad_broadcast:
929       case nir_intrinsic_quad_swap_horizontal:
930       case nir_intrinsic_quad_swap_vertical:
931       case nir_intrinsic_quad_swap_diagonal:
932          if (intrin->src[0].ssa->bit_size == 8)
933             return 16;
934          return 0;
935 
936       case nir_intrinsic_reduce:
937       case nir_intrinsic_inclusive_scan:
938       case nir_intrinsic_exclusive_scan:
939          /* There are a couple of register region issues that make things
940           * complicated for 8-bit types:
941           *
942           *    1. Only raw moves are allowed to write to a packed 8-bit
943           *       destination.
944           *    2. If we use a strided destination, the efficient way to do
945           *       scan operations ends up using strides that are too big to
946           *       encode in an instruction.
947           *
948           * To get around these issues, we just do all 8-bit scan operations
949           * in 16 bits.  It's actually fewer instructions than what we'd have
950           * to do if we were trying to do it in native 8-bit types and the
951           * results are the same once we truncate to 8 bits at the end.
952           */
953          if (intrin->def.bit_size == 8)
954             return 16;
955          return 0;
956 
957       default:
958          return 0;
959       }
960       break;
961    }
962 
963    case nir_instr_type_phi: {
964       nir_phi_instr *phi = nir_instr_as_phi(instr);
965       if (phi->def.bit_size == 8)
966          return 16;
967       return 0;
968    }
969 
970    default:
971       return 0;
972    }
973 }
974 
975 /* On gfx12.5+, if the offsets are not both constant and in the {-8,7} range,
976  * we will have nir_lower_tex() lower the source offset by returning true from
977  * this filter function.
978  */
979 static bool
lower_xehp_tg4_offset_filter(const nir_instr * instr,UNUSED const void * data)980 lower_xehp_tg4_offset_filter(const nir_instr *instr, UNUSED const void *data)
981 {
982    if (instr->type != nir_instr_type_tex)
983       return false;
984 
985    nir_tex_instr *tex = nir_instr_as_tex(instr);
986 
987    if (tex->op != nir_texop_tg4)
988       return false;
989 
990    int offset_index = nir_tex_instr_src_index(tex, nir_tex_src_offset);
991    if (offset_index < 0)
992       return false;
993 
994    if (!nir_src_is_const(tex->src[offset_index].src))
995       return true;
996 
997    int64_t offset_x = nir_src_comp_as_int(tex->src[offset_index].src, 0);
998    int64_t offset_y = nir_src_comp_as_int(tex->src[offset_index].src, 1);
999 
1000    return offset_x < -8 || offset_x > 7 || offset_y < -8 || offset_y > 7;
1001 }
1002 
1003 /* Does some simple lowering and runs the standard suite of optimizations
1004  *
1005  * This is intended to be called more-or-less directly after you get the
1006  * shader out of GLSL or some other source.  While it is geared towards i965,
1007  * it is not at all generator-specific.
1008  */
1009 void
brw_preprocess_nir(const struct brw_compiler * compiler,nir_shader * nir,const struct brw_nir_compiler_opts * opts)1010 brw_preprocess_nir(const struct brw_compiler *compiler, nir_shader *nir,
1011                    const struct brw_nir_compiler_opts *opts)
1012 {
1013    const struct intel_device_info *devinfo = compiler->devinfo;
1014    UNUSED bool progress; /* Written by OPT */
1015 
1016    nir_validate_ssa_dominance(nir, "before brw_preprocess_nir");
1017 
1018    OPT(nir_lower_frexp);
1019 
1020    OPT(nir_lower_alu_to_scalar, NULL, NULL);
1021 
1022    struct nir_opt_16bit_tex_image_options options = {
1023       .rounding_mode = nir_rounding_mode_undef,
1024       .opt_tex_dest_types = nir_type_float | nir_type_int | nir_type_uint,
1025    };
1026    OPT(nir_opt_16bit_tex_image, &options);
1027 
1028    if (nir->info.stage == MESA_SHADER_GEOMETRY)
1029       OPT(nir_lower_gs_intrinsics, 0);
1030 
1031    /* See also brw_nir_trig_workarounds.py */
1032    if (compiler->precise_trig &&
1033        !(devinfo->ver >= 10 || devinfo->platform == INTEL_PLATFORM_KBL))
1034       OPT(brw_nir_apply_trig_workarounds);
1035 
1036    /* This workaround existing for performance reasons. Since it requires not
1037     * setting RENDER_SURFACE_STATE::SurfaceArray when the array length is 1,
1038     * we're loosing the HW robustness feature in that case.
1039     *
1040     * So when robust image access is enabled, just avoid the workaround.
1041     */
1042    if (intel_needs_workaround(devinfo, 1806565034) && !opts->robust_image_access)
1043       OPT(intel_nir_clamp_image_1d_2d_array_sizes);
1044 
1045    const struct intel_nir_lower_texture_opts intel_tex_options = {
1046       .combined_lod_or_bias_and_offset = compiler->devinfo->ver >= 20,
1047    };
1048    OPT(intel_nir_lower_texture, &intel_tex_options);
1049 
1050    const nir_lower_tex_options tex_options = {
1051       .lower_txp = ~0,
1052       .lower_txf_offset = true,
1053       .lower_rect_offset = true,
1054       .lower_txd_cube_map = true,
1055       /* For below, See bspec 45942, "Enable new message layout for cube array" */
1056       .lower_txd_3d = devinfo->verx10 >= 125,
1057       .lower_txd_array = devinfo->verx10 >= 125,
1058       .lower_txb_shadow_clamp = true,
1059       .lower_txd_shadow_clamp = true,
1060       .lower_txd_offset_clamp = true,
1061       .lower_tg4_offsets = true,
1062       .lower_txs_lod = true, /* Wa_14012320009 */
1063       .lower_offset_filter =
1064          devinfo->verx10 >= 125 ? lower_xehp_tg4_offset_filter : NULL,
1065       .lower_invalid_implicit_lod = true,
1066    };
1067 
1068    /* In the case where TG4 coords are lowered to offsets and we have a
1069     * lower_xehp_tg4_offset_filter lowering those offsets further, we need to
1070     * rerun the pass because the instructions inserted by the first lowering
1071     * are not visible during that first pass.
1072     */
1073    if (OPT(nir_lower_tex, &tex_options)) {
1074       OPT(intel_nir_lower_texture, &intel_tex_options);
1075       OPT(nir_lower_tex, &tex_options);
1076    }
1077 
1078    OPT(nir_normalize_cubemap_coords);
1079 
1080    OPT(nir_lower_global_vars_to_local);
1081 
1082    OPT(nir_split_var_copies);
1083    OPT(nir_split_struct_vars, nir_var_function_temp);
1084 
1085    brw_nir_optimize(nir, devinfo);
1086 
1087    OPT(nir_lower_doubles, opts->softfp64, nir->options->lower_doubles_options);
1088    if (OPT(nir_lower_int64_float_conversions)) {
1089       OPT(nir_opt_algebraic);
1090       OPT(nir_lower_doubles, opts->softfp64,
1091           nir->options->lower_doubles_options);
1092    }
1093 
1094    OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
1095 
1096    /* Lower a bunch of stuff */
1097    OPT(nir_lower_var_copies);
1098 
1099    /* This needs to be run after the first optimization pass but before we
1100     * lower indirect derefs away
1101     */
1102    OPT(nir_opt_large_constants, NULL, 32);
1103 
1104    OPT(nir_lower_load_const_to_scalar);
1105 
1106    OPT(nir_lower_system_values);
1107    nir_lower_compute_system_values_options lower_csv_options = {
1108       .has_base_workgroup_id = nir->info.stage == MESA_SHADER_COMPUTE,
1109    };
1110    OPT(nir_lower_compute_system_values, &lower_csv_options);
1111 
1112    const nir_lower_subgroups_options subgroups_options = {
1113       .ballot_bit_size = 32,
1114       .ballot_components = 1,
1115       .lower_to_scalar = true,
1116       .lower_relative_shuffle = true,
1117       .lower_quad_broadcast_dynamic = true,
1118       .lower_elect = true,
1119       .lower_inverse_ballot = true,
1120       .lower_rotate_to_shuffle = true,
1121    };
1122    OPT(nir_lower_subgroups, &subgroups_options);
1123 
1124    nir_variable_mode indirect_mask =
1125       brw_nir_no_indirect_mask(compiler, nir->info.stage);
1126    OPT(nir_lower_indirect_derefs, indirect_mask, UINT32_MAX);
1127 
1128    /* Even in cases where we can handle indirect temporaries via scratch, we
1129     * it can still be expensive.  Lower indirects on small arrays to
1130     * conditional load/stores.
1131     *
1132     * The threshold of 16 was chosen semi-arbitrarily.  The idea is that an
1133     * indirect on an array of 16 elements is about 30 instructions at which
1134     * point, you may be better off doing a send.  With a SIMD8 program, 16
1135     * floats is 1/8 of the entire register file.  Any array larger than that
1136     * is likely to cause pressure issues.  Also, this value is sufficiently
1137     * high that the benchmarks known to suffer from large temporary array
1138     * issues are helped but nothing else in shader-db is hurt except for maybe
1139     * that one kerbal space program shader.
1140     */
1141    if (!(indirect_mask & nir_var_function_temp))
1142       OPT(nir_lower_indirect_derefs, nir_var_function_temp, 16);
1143 
1144    /* Lower array derefs of vectors for SSBO and UBO loads.  For both UBOs and
1145     * SSBOs, our back-end is capable of loading an entire vec4 at a time and
1146     * we would like to take advantage of that whenever possible regardless of
1147     * whether or not the app gives us full loads.  This should allow the
1148     * optimizer to combine UBO and SSBO load operations and save us some send
1149     * messages.
1150     */
1151    OPT(nir_lower_array_deref_of_vec,
1152        nir_var_mem_ubo | nir_var_mem_ssbo, NULL,
1153        nir_lower_direct_array_deref_of_vec_load);
1154 
1155    /* Clamp load_per_vertex_input of the TCS stage so that we do not generate
1156     * loads reading out of bounds. We can do this here because we called
1157     * nir_lower_system_values above.
1158     */
1159    if (nir->info.stage == MESA_SHADER_TESS_CTRL &&
1160        compiler->use_tcs_multi_patch)
1161       OPT(intel_nir_clamp_per_vertex_loads);
1162 
1163    /* Get rid of split copies */
1164    brw_nir_optimize(nir, devinfo);
1165 }
1166 
1167 static bool
brw_nir_zero_inputs_instr(struct nir_builder * b,nir_intrinsic_instr * intrin,void * data)1168 brw_nir_zero_inputs_instr(struct nir_builder *b, nir_intrinsic_instr *intrin,
1169                           void *data)
1170 {
1171    if (intrin->intrinsic != nir_intrinsic_load_deref)
1172       return false;
1173 
1174    nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1175    if (!nir_deref_mode_is(deref, nir_var_shader_in))
1176       return false;
1177 
1178    if (deref->deref_type != nir_deref_type_var)
1179       return false;
1180 
1181    nir_variable *var = deref->var;
1182 
1183    uint64_t zero_inputs = *(uint64_t *)data;
1184    if (!(BITFIELD64_BIT(var->data.location) & zero_inputs))
1185       return false;
1186 
1187    b->cursor = nir_before_instr(&intrin->instr);
1188 
1189    nir_def *zero = nir_imm_zero(b, 1, 32);
1190 
1191    nir_def_replace(&intrin->def, zero);
1192 
1193    return true;
1194 }
1195 
1196 static bool
brw_nir_zero_inputs(nir_shader * shader,uint64_t * zero_inputs)1197 brw_nir_zero_inputs(nir_shader *shader, uint64_t *zero_inputs)
1198 {
1199    return nir_shader_intrinsics_pass(shader, brw_nir_zero_inputs_instr,
1200                                      nir_metadata_control_flow,
1201                                      zero_inputs);
1202 }
1203 
1204 /* Code for Wa_18019110168 may have created input/output variables beyond
1205  * VARYING_SLOT_MAX and removed uses of variables below VARYING_SLOT_MAX.
1206  * Clean it up, so they all stay below VARYING_SLOT_MAX.
1207  */
1208 static void
brw_mesh_compact_io(nir_shader * mesh,nir_shader * frag)1209 brw_mesh_compact_io(nir_shader *mesh, nir_shader *frag)
1210 {
1211    gl_varying_slot mapping[VARYING_SLOT_MAX] = {0, };
1212    gl_varying_slot cur = VARYING_SLOT_VAR0;
1213    bool compact = false;
1214 
1215    nir_foreach_shader_out_variable(var, mesh) {
1216       gl_varying_slot location = var->data.location;
1217       if (location < VARYING_SLOT_VAR0)
1218          continue;
1219       assert(location < ARRAY_SIZE(mapping));
1220 
1221       const struct glsl_type *type = var->type;
1222       if (nir_is_arrayed_io(var, MESA_SHADER_MESH) || var->data.per_view) {
1223          assert(glsl_type_is_array(type));
1224          type = glsl_get_array_element(type);
1225       }
1226 
1227       if (mapping[location])
1228          continue;
1229 
1230       unsigned num_slots = glsl_count_attribute_slots(type, false);
1231 
1232       compact |= location + num_slots > VARYING_SLOT_MAX;
1233 
1234       mapping[location] = cur;
1235       cur += num_slots;
1236    }
1237 
1238    if (!compact)
1239       return;
1240 
1241    /* The rest of this function should be hit only for Wa_18019110168. */
1242 
1243    nir_foreach_shader_out_variable(var, mesh) {
1244       gl_varying_slot location = var->data.location;
1245       if (location < VARYING_SLOT_VAR0)
1246          continue;
1247       location = mapping[location];
1248       if (location == 0)
1249          continue;
1250       var->data.location = location;
1251    }
1252 
1253    nir_foreach_shader_in_variable(var, frag) {
1254       gl_varying_slot location = var->data.location;
1255       if (location < VARYING_SLOT_VAR0)
1256          continue;
1257       location = mapping[location];
1258       if (location == 0)
1259          continue;
1260       var->data.location = location;
1261    }
1262 
1263    nir_shader_gather_info(mesh, nir_shader_get_entrypoint(mesh));
1264    nir_shader_gather_info(frag, nir_shader_get_entrypoint(frag));
1265 
1266    if (should_print_nir(mesh)) {
1267       printf("%s\n", __func__);
1268       nir_print_shader(mesh, stdout);
1269    }
1270    if (should_print_nir(frag)) {
1271       printf("%s\n", __func__);
1272       nir_print_shader(frag, stdout);
1273    }
1274 }
1275 
1276 void
brw_nir_link_shaders(const struct brw_compiler * compiler,nir_shader * producer,nir_shader * consumer)1277 brw_nir_link_shaders(const struct brw_compiler *compiler,
1278                      nir_shader *producer, nir_shader *consumer)
1279 {
1280    const struct intel_device_info *devinfo = compiler->devinfo;
1281 
1282    if (producer->info.stage == MESA_SHADER_MESH &&
1283        consumer->info.stage == MESA_SHADER_FRAGMENT) {
1284       uint64_t fs_inputs = 0, ms_outputs = 0;
1285       /* gl_MeshPerPrimitiveEXT[].gl_ViewportIndex, gl_PrimitiveID and gl_Layer
1286        * are per primitive, but fragment shader does not have them marked as
1287        * such. Add the annotation here.
1288        */
1289       nir_foreach_shader_in_variable(var, consumer) {
1290          fs_inputs |= BITFIELD64_BIT(var->data.location);
1291 
1292          switch (var->data.location) {
1293             case VARYING_SLOT_LAYER:
1294             case VARYING_SLOT_PRIMITIVE_ID:
1295             case VARYING_SLOT_VIEWPORT:
1296                var->data.per_primitive = 1;
1297                break;
1298             default:
1299                continue;
1300          }
1301       }
1302 
1303       nir_foreach_shader_out_variable(var, producer)
1304          ms_outputs |= BITFIELD64_BIT(var->data.location);
1305 
1306       uint64_t zero_inputs = ~ms_outputs & fs_inputs;
1307       zero_inputs &= BITFIELD64_BIT(VARYING_SLOT_LAYER) |
1308                      BITFIELD64_BIT(VARYING_SLOT_VIEWPORT);
1309 
1310       if (zero_inputs)
1311          NIR_PASS(_, consumer, brw_nir_zero_inputs, &zero_inputs);
1312    }
1313 
1314    nir_lower_io_arrays_to_elements(producer, consumer);
1315    nir_validate_shader(producer, "after nir_lower_io_arrays_to_elements");
1316    nir_validate_shader(consumer, "after nir_lower_io_arrays_to_elements");
1317 
1318    NIR_PASS(_, producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
1319    NIR_PASS(_, consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
1320    brw_nir_optimize(producer, devinfo);
1321    brw_nir_optimize(consumer, devinfo);
1322 
1323    if (nir_link_opt_varyings(producer, consumer))
1324       brw_nir_optimize(consumer, devinfo);
1325 
1326    NIR_PASS(_, producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
1327    NIR_PASS(_, consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
1328 
1329    if (nir_remove_unused_varyings(producer, consumer)) {
1330       if (should_print_nir(producer)) {
1331          printf("nir_remove_unused_varyings\n");
1332          nir_print_shader(producer, stdout);
1333       }
1334       if (should_print_nir(consumer)) {
1335          printf("nir_remove_unused_varyings\n");
1336          nir_print_shader(consumer, stdout);
1337       }
1338 
1339       NIR_PASS(_, producer, nir_lower_global_vars_to_local);
1340       NIR_PASS(_, consumer, nir_lower_global_vars_to_local);
1341 
1342       /* The backend might not be able to handle indirects on
1343        * temporaries so we need to lower indirects on any of the
1344        * varyings we have demoted here.
1345        */
1346       NIR_PASS(_, producer, nir_lower_indirect_derefs,
1347                   brw_nir_no_indirect_mask(compiler, producer->info.stage),
1348                   UINT32_MAX);
1349       NIR_PASS(_, consumer, nir_lower_indirect_derefs,
1350                   brw_nir_no_indirect_mask(compiler, consumer->info.stage),
1351                   UINT32_MAX);
1352 
1353       brw_nir_optimize(producer, devinfo);
1354       brw_nir_optimize(consumer, devinfo);
1355 
1356       if (producer->info.stage == MESA_SHADER_MESH &&
1357             consumer->info.stage == MESA_SHADER_FRAGMENT) {
1358          brw_mesh_compact_io(producer, consumer);
1359       }
1360    }
1361 
1362    NIR_PASS(_, producer, nir_lower_io_to_vector, nir_var_shader_out);
1363 
1364    if (producer->info.stage == MESA_SHADER_TESS_CTRL &&
1365        producer->options->vectorize_tess_levels)
1366    NIR_PASS_V(producer, nir_vectorize_tess_levels);
1367 
1368    NIR_PASS(_, producer, nir_opt_combine_stores, nir_var_shader_out);
1369    NIR_PASS(_, consumer, nir_lower_io_to_vector, nir_var_shader_in);
1370 
1371    if (producer->info.stage != MESA_SHADER_TESS_CTRL &&
1372        producer->info.stage != MESA_SHADER_MESH &&
1373        producer->info.stage != MESA_SHADER_TASK) {
1374       /* Calling lower_io_to_vector creates output variable writes with
1375        * write-masks.  On non-TCS outputs, the back-end can't handle it and we
1376        * need to call nir_lower_io_to_temporaries to get rid of them.  This,
1377        * in turn, creates temporary variables and extra copy_deref intrinsics
1378        * that we need to clean up.
1379        *
1380        * Note Mesh/Task don't support I/O as temporaries (I/O is shared
1381        * between whole workgroup, possibly using multiple HW threads). For
1382        * those write-mask in output is handled by I/O lowering.
1383        */
1384       NIR_PASS_V(producer, nir_lower_io_to_temporaries,
1385                  nir_shader_get_entrypoint(producer), true, false);
1386       NIR_PASS(_, producer, nir_lower_global_vars_to_local);
1387       NIR_PASS(_, producer, nir_split_var_copies);
1388       NIR_PASS(_, producer, nir_lower_var_copies);
1389    }
1390 
1391    if (producer->info.stage == MESA_SHADER_TASK &&
1392          consumer->info.stage == MESA_SHADER_MESH) {
1393 
1394       for (unsigned i = 0; i < 3; ++i)
1395          assert(producer->info.mesh.ts_mesh_dispatch_dimensions[i] <= UINT16_MAX);
1396 
1397       nir_lower_compute_system_values_options options = {
1398             .lower_workgroup_id_to_index = true,
1399             .num_workgroups[0] = producer->info.mesh.ts_mesh_dispatch_dimensions[0],
1400             .num_workgroups[1] = producer->info.mesh.ts_mesh_dispatch_dimensions[1],
1401             .num_workgroups[2] = producer->info.mesh.ts_mesh_dispatch_dimensions[2],
1402             /* nir_lower_idiv generates expensive code */
1403             .shortcut_1d_workgroup_id = compiler->devinfo->verx10 >= 125,
1404       };
1405 
1406       NIR_PASS(_, consumer, nir_lower_compute_system_values, &options);
1407    }
1408 }
1409 
1410 bool
brw_nir_should_vectorize_mem(unsigned align_mul,unsigned align_offset,unsigned bit_size,unsigned num_components,nir_intrinsic_instr * low,nir_intrinsic_instr * high,void * data)1411 brw_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
1412                              unsigned bit_size,
1413                              unsigned num_components,
1414                              nir_intrinsic_instr *low,
1415                              nir_intrinsic_instr *high,
1416                              void *data)
1417 {
1418    /* Don't combine things to generate 64-bit loads/stores.  We have to split
1419     * those back into 32-bit ones anyway and UBO loads aren't split in NIR so
1420     * we don't want to make a mess for the back-end.
1421     */
1422    if (bit_size > 32)
1423       return false;
1424 
1425    if (low->intrinsic == nir_intrinsic_load_ubo_uniform_block_intel ||
1426        low->intrinsic == nir_intrinsic_load_ssbo_uniform_block_intel ||
1427        low->intrinsic == nir_intrinsic_load_shared_uniform_block_intel ||
1428        low->intrinsic == nir_intrinsic_load_global_constant_uniform_block_intel) {
1429       if (num_components > 4) {
1430          if (!util_is_power_of_two_nonzero(num_components))
1431             return false;
1432 
1433          if (bit_size != 32)
1434             return false;
1435 
1436          if (num_components > 32)
1437             return false;
1438       }
1439    } else {
1440       /* We can handle at most a vec4 right now.  Anything bigger would get
1441        * immediately split by brw_nir_lower_mem_access_bit_sizes anyway.
1442        */
1443       if (num_components > 4)
1444          return false;
1445    }
1446 
1447 
1448    uint32_t align;
1449    if (align_offset)
1450       align = 1 << (ffs(align_offset) - 1);
1451    else
1452       align = align_mul;
1453 
1454    if (align < bit_size / 8)
1455       return false;
1456 
1457    return true;
1458 }
1459 
1460 static
combine_all_memory_barriers(nir_intrinsic_instr * a,nir_intrinsic_instr * b,void * data)1461 bool combine_all_memory_barriers(nir_intrinsic_instr *a,
1462                                  nir_intrinsic_instr *b,
1463                                  void *data)
1464 {
1465    /* Combine control barriers with identical memory semantics. This prevents
1466     * the second barrier generating a spurious, identical fence message as the
1467     * first barrier.
1468     */
1469    if (nir_intrinsic_memory_modes(a) == nir_intrinsic_memory_modes(b) &&
1470        nir_intrinsic_memory_semantics(a) == nir_intrinsic_memory_semantics(b) &&
1471        nir_intrinsic_memory_scope(a) == nir_intrinsic_memory_scope(b)) {
1472       nir_intrinsic_set_execution_scope(a, MAX2(nir_intrinsic_execution_scope(a),
1473                                                 nir_intrinsic_execution_scope(b)));
1474       return true;
1475    }
1476 
1477    /* Only combine pure memory barriers */
1478    if ((nir_intrinsic_execution_scope(a) != SCOPE_NONE) ||
1479        (nir_intrinsic_execution_scope(b) != SCOPE_NONE))
1480       return false;
1481 
1482    /* Translation to backend IR will get rid of modes we don't care about, so
1483     * no harm in always combining them.
1484     *
1485     * TODO: While HW has only ACQUIRE|RELEASE fences, we could improve the
1486     * scheduling so that it can take advantage of the different semantics.
1487     */
1488    nir_intrinsic_set_memory_modes(a, nir_intrinsic_memory_modes(a) |
1489                                      nir_intrinsic_memory_modes(b));
1490    nir_intrinsic_set_memory_semantics(a, nir_intrinsic_memory_semantics(a) |
1491                                          nir_intrinsic_memory_semantics(b));
1492    nir_intrinsic_set_memory_scope(a, MAX2(nir_intrinsic_memory_scope(a),
1493                                           nir_intrinsic_memory_scope(b)));
1494    return true;
1495 }
1496 
1497 static nir_mem_access_size_align
get_mem_access_size_align(nir_intrinsic_op intrin,uint8_t bytes,uint8_t bit_size,uint32_t align_mul,uint32_t align_offset,bool offset_is_const,const void * cb_data)1498 get_mem_access_size_align(nir_intrinsic_op intrin, uint8_t bytes,
1499                           uint8_t bit_size, uint32_t align_mul, uint32_t align_offset,
1500                           bool offset_is_const, const void *cb_data)
1501 {
1502    const uint32_t align = nir_combined_align(align_mul, align_offset);
1503 
1504    switch (intrin) {
1505    case nir_intrinsic_load_ssbo:
1506    case nir_intrinsic_load_shared:
1507    case nir_intrinsic_load_scratch:
1508       /* The offset is constant so we can use a 32-bit load and just shift it
1509        * around as needed.
1510        */
1511       if (align < 4 && offset_is_const) {
1512          assert(util_is_power_of_two_nonzero(align_mul) && align_mul >= 4);
1513          const unsigned pad = align_offset % 4;
1514          const unsigned comps32 = MIN2(DIV_ROUND_UP(bytes + pad, 4), 4);
1515          return (nir_mem_access_size_align) {
1516             .bit_size = 32,
1517             .num_components = comps32,
1518             .align = 4,
1519          };
1520       }
1521       break;
1522 
1523    case nir_intrinsic_load_task_payload:
1524       if (bytes < 4 || align < 4) {
1525          return (nir_mem_access_size_align) {
1526             .bit_size = 32,
1527             .num_components = 1,
1528             .align = 4,
1529          };
1530       }
1531       break;
1532 
1533    default:
1534       break;
1535    }
1536 
1537    const bool is_load = nir_intrinsic_infos[intrin].has_dest;
1538    const bool is_scratch = intrin == nir_intrinsic_load_scratch ||
1539                            intrin == nir_intrinsic_store_scratch;
1540 
1541    if (align < 4 || bytes < 4) {
1542       /* Choose a byte, word, or dword */
1543       bytes = MIN2(bytes, 4);
1544       if (bytes == 3)
1545          bytes = is_load ? 4 : 2;
1546 
1547       if (is_scratch) {
1548          /* The way scratch address swizzling works in the back-end, it
1549           * happens at a DWORD granularity so we can't have a single load
1550           * or store cross a DWORD boundary.
1551           */
1552          if ((align_offset % 4) + bytes > MIN2(align_mul, 4))
1553             bytes = MIN2(align_mul, 4) - (align_offset % 4);
1554 
1555          /* Must be a power of two */
1556          if (bytes == 3)
1557             bytes = 2;
1558       }
1559 
1560       return (nir_mem_access_size_align) {
1561          .bit_size = bytes * 8,
1562          .num_components = 1,
1563          .align = 1,
1564       };
1565    } else {
1566       bytes = MIN2(bytes, 16);
1567       return (nir_mem_access_size_align) {
1568          .bit_size = 32,
1569          .num_components = is_scratch ? 1 :
1570                            is_load ? DIV_ROUND_UP(bytes, 4) : bytes / 4,
1571          .align = 4,
1572       };
1573    }
1574 }
1575 
1576 static void
brw_vectorize_lower_mem_access(nir_shader * nir,const struct brw_compiler * compiler,enum brw_robustness_flags robust_flags)1577 brw_vectorize_lower_mem_access(nir_shader *nir,
1578                                const struct brw_compiler *compiler,
1579                                enum brw_robustness_flags robust_flags)
1580 {
1581    bool progress = false;
1582 
1583    nir_load_store_vectorize_options options = {
1584       .modes = nir_var_mem_ubo | nir_var_mem_ssbo |
1585                nir_var_mem_global | nir_var_mem_shared |
1586                nir_var_mem_task_payload,
1587       .callback = brw_nir_should_vectorize_mem,
1588       .robust_modes = (nir_variable_mode)0,
1589    };
1590 
1591    if (robust_flags & BRW_ROBUSTNESS_UBO)
1592       options.robust_modes |= nir_var_mem_ubo | nir_var_mem_global;
1593    if (robust_flags & BRW_ROBUSTNESS_SSBO)
1594       options.robust_modes |= nir_var_mem_ssbo | nir_var_mem_global;
1595 
1596    OPT(nir_opt_load_store_vectorize, &options);
1597 
1598    /* Required for nir_divergence_analysis() */
1599    OPT(nir_convert_to_lcssa, true, true);
1600 
1601    /* When HW supports block loads, using the divergence analysis, try
1602     * to find uniform SSBO loads and turn them into block loads.
1603     *
1604     * Rerun the vectorizer after that to make the largest possible block
1605     * loads.
1606     *
1607     * This is a win on 2 fronts :
1608     *   - fewer send messages
1609     *   - reduced register pressure
1610     */
1611    nir_divergence_analysis(nir);
1612    if (OPT(intel_nir_blockify_uniform_loads, compiler->devinfo))
1613       OPT(nir_opt_load_store_vectorize, &options);
1614    OPT(nir_opt_remove_phis);
1615 
1616    nir_lower_mem_access_bit_sizes_options mem_access_options = {
1617       .modes = nir_var_mem_ssbo |
1618                nir_var_mem_constant |
1619                nir_var_mem_task_payload |
1620                nir_var_shader_temp |
1621                nir_var_function_temp |
1622                nir_var_mem_global |
1623                nir_var_mem_shared,
1624       .callback = get_mem_access_size_align,
1625    };
1626    OPT(nir_lower_mem_access_bit_sizes, &mem_access_options);
1627 
1628    while (progress) {
1629       progress = false;
1630 
1631       OPT(nir_lower_pack);
1632       OPT(nir_copy_prop);
1633       OPT(nir_opt_dce);
1634       OPT(nir_opt_cse);
1635       OPT(nir_opt_algebraic);
1636       OPT(nir_opt_constant_folding);
1637    }
1638 }
1639 
1640 static bool
nir_shader_has_local_variables(const nir_shader * nir)1641 nir_shader_has_local_variables(const nir_shader *nir)
1642 {
1643    nir_foreach_function_impl(impl, nir) {
1644       if (!exec_list_is_empty(&impl->locals))
1645          return true;
1646    }
1647 
1648    return false;
1649 }
1650 
1651 /* Prepare the given shader for codegen
1652  *
1653  * This function is intended to be called right before going into the actual
1654  * backend and is highly backend-specific.  Also, once this function has been
1655  * called on a shader, it will no longer be in SSA form so most optimizations
1656  * will not work.
1657  */
1658 void
brw_postprocess_nir(nir_shader * nir,const struct brw_compiler * compiler,bool debug_enabled,enum brw_robustness_flags robust_flags)1659 brw_postprocess_nir(nir_shader *nir, const struct brw_compiler *compiler,
1660                     bool debug_enabled,
1661                     enum brw_robustness_flags robust_flags)
1662 {
1663    const struct intel_device_info *devinfo = compiler->devinfo;
1664 
1665    UNUSED bool progress; /* Written by OPT */
1666 
1667    OPT(intel_nir_lower_sparse_intrinsics);
1668 
1669    OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
1670 
1671    OPT(nir_opt_combine_barriers, combine_all_memory_barriers, NULL);
1672 
1673    OPT(intel_nir_lower_printf);
1674 
1675    do {
1676       progress = false;
1677       OPT(nir_opt_algebraic_before_ffma);
1678    } while (progress);
1679 
1680    if (devinfo->verx10 >= 125) {
1681       /* Lower integer division by constants before nir_lower_idiv. */
1682       OPT(nir_opt_idiv_const, 32);
1683       const nir_lower_idiv_options options = {
1684          .allow_fp16 = false
1685       };
1686       OPT(nir_lower_idiv, &options);
1687    }
1688 
1689    if (gl_shader_stage_can_set_fragment_shading_rate(nir->info.stage))
1690       NIR_PASS(_, nir, intel_nir_lower_shading_rate_output);
1691 
1692    OPT(brw_nir_tag_speculative_access);
1693 
1694    brw_nir_optimize(nir, devinfo);
1695 
1696    if (nir_shader_has_local_variables(nir)) {
1697       OPT(nir_lower_vars_to_explicit_types, nir_var_function_temp,
1698           glsl_get_natural_size_align_bytes);
1699       OPT(nir_lower_explicit_io, nir_var_function_temp,
1700           nir_address_format_32bit_offset);
1701       brw_nir_optimize(nir, devinfo);
1702    }
1703 
1704    brw_vectorize_lower_mem_access(nir, compiler, robust_flags);
1705 
1706    /* Potentially perform this optimization pass twice because it can create
1707     * additional opportunities for itself.
1708     */
1709    if (OPT(nir_opt_algebraic_before_lower_int64))
1710       OPT(nir_opt_algebraic_before_lower_int64);
1711 
1712    if (OPT(nir_lower_int64))
1713       brw_nir_optimize(nir, devinfo);
1714 
1715    /* Try and fuse multiply-adds, if successful, run shrink_vectors to
1716     * avoid peephole_ffma to generate things like this :
1717     *    vec16 ssa_0 = ...
1718     *    vec16 ssa_1 = fneg ssa_0
1719     *    vec1  ssa_2 = ffma ssa_1, ...
1720     *
1721     * We want this instead :
1722     *    vec16 ssa_0 = ...
1723     *    vec1  ssa_1 = fneg ssa_0.x
1724     *    vec1  ssa_2 = ffma ssa_1, ...
1725     */
1726    if (OPT(intel_nir_opt_peephole_ffma))
1727       OPT(nir_opt_shrink_vectors, false);
1728 
1729    OPT(intel_nir_opt_peephole_imul32x16);
1730 
1731    if (OPT(nir_opt_comparison_pre)) {
1732       OPT(nir_copy_prop);
1733       OPT(nir_opt_dce);
1734       OPT(nir_opt_cse);
1735 
1736       /* Do the select peepehole again.  nir_opt_comparison_pre (combined with
1737        * the other optimization passes) will have removed at least one
1738        * instruction from one of the branches of the if-statement, so now it
1739        * might be under the threshold of conversion to bcsel.
1740        */
1741       OPT(nir_opt_peephole_select, 0, false, false);
1742       OPT(nir_opt_peephole_select, 1, false, true);
1743    }
1744 
1745    do {
1746       progress = false;
1747 
1748       OPT(brw_nir_opt_fsat);
1749       OPT(nir_opt_algebraic_late);
1750       OPT(brw_nir_lower_fsign);
1751 
1752       if (progress) {
1753          OPT(nir_opt_constant_folding);
1754          OPT(nir_copy_prop);
1755          OPT(nir_opt_dce);
1756          OPT(nir_opt_cse);
1757       }
1758    } while (progress);
1759 
1760 
1761    if (OPT(nir_lower_fp16_casts, nir_lower_fp16_split_fp64)) {
1762       if (OPT(nir_lower_int64)) {
1763          brw_nir_optimize(nir, devinfo);
1764       }
1765    }
1766 
1767    OPT(nir_lower_alu_to_scalar, NULL, NULL);
1768 
1769    while (OPT(nir_opt_algebraic_distribute_src_mods)) {
1770       OPT(nir_opt_constant_folding);
1771       OPT(nir_copy_prop);
1772       OPT(nir_opt_dce);
1773       OPT(nir_opt_cse);
1774    }
1775 
1776    OPT(nir_copy_prop);
1777    OPT(nir_opt_dce);
1778    OPT(nir_opt_move, nir_move_comparisons);
1779    OPT(nir_opt_dead_cf);
1780 
1781    bool divergence_analysis_dirty = false;
1782    NIR_PASS(_, nir, nir_convert_to_lcssa, true, true);
1783    NIR_PASS_V(nir, nir_divergence_analysis);
1784 
1785    static const nir_lower_subgroups_options subgroups_options = {
1786       .ballot_bit_size = 32,
1787       .ballot_components = 1,
1788       .lower_elect = true,
1789       .lower_subgroup_masks = true,
1790    };
1791 
1792    if (OPT(nir_opt_uniform_atomics, false)) {
1793       OPT(nir_lower_subgroups, &subgroups_options);
1794 
1795       OPT(nir_opt_algebraic_before_lower_int64);
1796 
1797       if (OPT(nir_lower_int64))
1798          brw_nir_optimize(nir, devinfo);
1799 
1800       divergence_analysis_dirty = true;
1801    }
1802 
1803    /* nir_opt_uniform_subgroup can create some operations (e.g.,
1804     * load_subgroup_lt_mask) that need to be lowered again.
1805     */
1806    if (OPT(nir_opt_uniform_subgroup, &subgroups_options)) {
1807       /* Some of the optimizations can generate 64-bit integer multiplication
1808        * that must be lowered.
1809        */
1810       OPT(nir_lower_int64);
1811 
1812       /* Even if nir_lower_int64 did not make progress, re-run the main
1813        * optimization loop. nir_opt_uniform_subgroup may have made some things
1814        * that previously appeared divergent be marked as convergent. This
1815        * allows the elimination of some loops over, say, a TXF instruction
1816        * with a non-uniform texture handle.
1817        */
1818       brw_nir_optimize(nir, devinfo);
1819 
1820       OPT(nir_lower_subgroups, &subgroups_options);
1821    }
1822 
1823    /* Run intel_nir_lower_conversions only after the last tiem
1824     * brw_nir_optimize is called. Various optimizations invoked there can
1825     * rematerialize the conversions that the lowering pass eliminates.
1826     */
1827    OPT(intel_nir_lower_conversions);
1828 
1829    /* Do this only after the last opt_gcm. GCM will undo this lowering. */
1830    if (nir->info.stage == MESA_SHADER_FRAGMENT) {
1831       if (divergence_analysis_dirty) {
1832          NIR_PASS(_, nir, nir_convert_to_lcssa, true, true);
1833          NIR_PASS_V(nir, nir_divergence_analysis);
1834       }
1835 
1836       OPT(intel_nir_lower_non_uniform_barycentric_at_sample);
1837    }
1838 
1839    /* Clean up LCSSA phis */
1840    OPT(nir_opt_remove_phis);
1841 
1842    OPT(nir_lower_bool_to_int32);
1843    OPT(nir_copy_prop);
1844    OPT(nir_opt_dce);
1845 
1846    OPT(nir_lower_locals_to_regs, 32);
1847 
1848    if (unlikely(debug_enabled)) {
1849       /* Re-index SSA defs so we print more sensible numbers. */
1850       nir_foreach_function_impl(impl, nir) {
1851          nir_index_ssa_defs(impl);
1852       }
1853 
1854       fprintf(stderr, "NIR (SSA form) for %s shader:\n",
1855               _mesa_shader_stage_to_string(nir->info.stage));
1856       nir_print_shader(nir, stderr);
1857    }
1858 
1859    nir_validate_ssa_dominance(nir, "before nir_convert_from_ssa");
1860 
1861    /* Rerun the divergence analysis before convert_from_ssa as this pass has
1862     * some assert on consistent divergence flags.
1863     */
1864    NIR_PASS(_, nir, nir_convert_to_lcssa, true, true);
1865    NIR_PASS_V(nir, nir_divergence_analysis);
1866 
1867    OPT(nir_convert_from_ssa, true);
1868 
1869    OPT(nir_opt_dce);
1870 
1871    if (OPT(nir_opt_rematerialize_compares))
1872       OPT(nir_opt_dce);
1873 
1874    /* The mesh stages require this pass to be called at the last minute,
1875     * but if anything is done by it, it will also constant fold, and that
1876     * undoes the work done by nir_trivialize_registers, so call it right
1877     * before that one instead.
1878     */
1879    if (nir->info.stage == MESA_SHADER_MESH ||
1880        nir->info.stage == MESA_SHADER_TASK)
1881       brw_nir_adjust_payload(nir);
1882 
1883    nir_trivialize_registers(nir);
1884 
1885    nir_sweep(nir);
1886 
1887    if (unlikely(debug_enabled)) {
1888       fprintf(stderr, "NIR (final form) for %s shader:\n",
1889               _mesa_shader_stage_to_string(nir->info.stage));
1890       nir_print_shader(nir, stderr);
1891    }
1892 }
1893 
1894 static unsigned
get_subgroup_size(const struct shader_info * info,unsigned max_subgroup_size)1895 get_subgroup_size(const struct shader_info *info, unsigned max_subgroup_size)
1896 {
1897    switch (info->subgroup_size) {
1898    case SUBGROUP_SIZE_API_CONSTANT:
1899       /* We have to use the global constant size. */
1900       return BRW_SUBGROUP_SIZE;
1901 
1902    case SUBGROUP_SIZE_UNIFORM:
1903       /* It has to be uniform across all invocations but can vary per stage
1904        * if we want.  This gives us a bit more freedom.
1905        *
1906        * For compute, brw_nir_apply_key is called per-dispatch-width so this
1907        * is the actual subgroup size and not a maximum.  However, we only
1908        * invoke one size of any given compute shader so it's still guaranteed
1909        * to be uniform across invocations.
1910        */
1911       return max_subgroup_size;
1912 
1913    case SUBGROUP_SIZE_VARYING:
1914       /* The subgroup size is allowed to be fully varying.  For geometry
1915        * stages, we know it's always 8 which is max_subgroup_size so we can
1916        * return that.  For compute, brw_nir_apply_key is called once per
1917        * dispatch-width so max_subgroup_size is the real subgroup size.
1918        *
1919        * For fragment, we return 0 and let it fall through to the back-end
1920        * compiler.  This means we can't optimize based on subgroup size but
1921        * that's a risk the client took when it asked for a varying subgroup
1922        * size.
1923        */
1924       return info->stage == MESA_SHADER_FRAGMENT ? 0 : max_subgroup_size;
1925 
1926    case SUBGROUP_SIZE_REQUIRE_4:
1927       unreachable("Unsupported subgroup size type");
1928 
1929    case SUBGROUP_SIZE_REQUIRE_8:
1930    case SUBGROUP_SIZE_REQUIRE_16:
1931    case SUBGROUP_SIZE_REQUIRE_32:
1932       assert(gl_shader_stage_uses_workgroup(info->stage) ||
1933              (info->stage >= MESA_SHADER_RAYGEN && info->stage <= MESA_SHADER_CALLABLE));
1934       /* These enum values are expressly chosen to be equal to the subgroup
1935        * size that they require.
1936        */
1937       return info->subgroup_size;
1938 
1939    case SUBGROUP_SIZE_FULL_SUBGROUPS:
1940    case SUBGROUP_SIZE_REQUIRE_64:
1941    case SUBGROUP_SIZE_REQUIRE_128:
1942       break;
1943    }
1944 
1945    unreachable("Invalid subgroup size type");
1946 }
1947 
1948 unsigned
brw_nir_api_subgroup_size(const nir_shader * nir,unsigned hw_subgroup_size)1949 brw_nir_api_subgroup_size(const nir_shader *nir,
1950                           unsigned hw_subgroup_size)
1951 {
1952    return get_subgroup_size(&nir->info, hw_subgroup_size);
1953 }
1954 
1955 void
brw_nir_apply_key(nir_shader * nir,const struct brw_compiler * compiler,const struct brw_base_prog_key * key,unsigned max_subgroup_size)1956 brw_nir_apply_key(nir_shader *nir,
1957                   const struct brw_compiler *compiler,
1958                   const struct brw_base_prog_key *key,
1959                   unsigned max_subgroup_size)
1960 {
1961    bool progress = false;
1962 
1963    nir_lower_tex_options nir_tex_opts = {
1964       .lower_txd_clamp_bindless_sampler = true,
1965       .lower_txd_clamp_if_sampler_index_not_lt_16 = true,
1966       .lower_invalid_implicit_lod = true,
1967       .lower_index_to_offset = true,
1968    };
1969    OPT(nir_lower_tex, &nir_tex_opts);
1970 
1971    const struct intel_nir_lower_texture_opts tex_opts = {
1972       .combined_lod_and_array_index = compiler->devinfo->ver >= 20,
1973    };
1974    OPT(intel_nir_lower_texture, &tex_opts);
1975 
1976    const nir_lower_subgroups_options subgroups_options = {
1977       .subgroup_size = get_subgroup_size(&nir->info, max_subgroup_size),
1978       .ballot_bit_size = 32,
1979       .ballot_components = 1,
1980       .lower_subgroup_masks = true,
1981    };
1982    OPT(nir_lower_subgroups, &subgroups_options);
1983 
1984    if (key->limit_trig_input_range)
1985       OPT(brw_nir_limit_trig_input_range_workaround);
1986 
1987    if (progress) {
1988       brw_nir_optimize(nir, compiler->devinfo);
1989    }
1990 }
1991 
1992 enum brw_conditional_mod
brw_cmod_for_nir_comparison(nir_op op)1993 brw_cmod_for_nir_comparison(nir_op op)
1994 {
1995    switch (op) {
1996    case nir_op_flt:
1997    case nir_op_flt32:
1998    case nir_op_ilt:
1999    case nir_op_ilt32:
2000    case nir_op_ult:
2001    case nir_op_ult32:
2002       return BRW_CONDITIONAL_L;
2003 
2004    case nir_op_fge:
2005    case nir_op_fge32:
2006    case nir_op_ige:
2007    case nir_op_ige32:
2008    case nir_op_uge:
2009    case nir_op_uge32:
2010       return BRW_CONDITIONAL_GE;
2011 
2012    case nir_op_feq:
2013    case nir_op_feq32:
2014    case nir_op_ieq:
2015    case nir_op_ieq32:
2016    case nir_op_b32all_fequal2:
2017    case nir_op_b32all_iequal2:
2018    case nir_op_b32all_fequal3:
2019    case nir_op_b32all_iequal3:
2020    case nir_op_b32all_fequal4:
2021    case nir_op_b32all_iequal4:
2022       return BRW_CONDITIONAL_Z;
2023 
2024    case nir_op_fneu:
2025    case nir_op_fneu32:
2026    case nir_op_ine:
2027    case nir_op_ine32:
2028    case nir_op_b32any_fnequal2:
2029    case nir_op_b32any_inequal2:
2030    case nir_op_b32any_fnequal3:
2031    case nir_op_b32any_inequal3:
2032    case nir_op_b32any_fnequal4:
2033    case nir_op_b32any_inequal4:
2034       return BRW_CONDITIONAL_NZ;
2035 
2036    default:
2037       unreachable("Unsupported NIR comparison op");
2038    }
2039 }
2040 
2041 enum lsc_opcode
lsc_op_for_nir_intrinsic(const nir_intrinsic_instr * intrin)2042 lsc_op_for_nir_intrinsic(const nir_intrinsic_instr *intrin)
2043 {
2044    switch (intrin->intrinsic) {
2045    case nir_intrinsic_load_ssbo:
2046    case nir_intrinsic_load_shared:
2047    case nir_intrinsic_load_global:
2048    case nir_intrinsic_load_global_block_intel:
2049    case nir_intrinsic_load_global_constant:
2050    case nir_intrinsic_load_global_constant_uniform_block_intel:
2051    case nir_intrinsic_load_shared_block_intel:
2052    case nir_intrinsic_load_shared_uniform_block_intel:
2053    case nir_intrinsic_load_ssbo_block_intel:
2054    case nir_intrinsic_load_ssbo_uniform_block_intel:
2055    case nir_intrinsic_load_ubo_uniform_block_intel:
2056    case nir_intrinsic_load_scratch:
2057       return LSC_OP_LOAD;
2058 
2059    case nir_intrinsic_store_ssbo:
2060    case nir_intrinsic_store_shared:
2061    case nir_intrinsic_store_global:
2062    case nir_intrinsic_store_global_block_intel:
2063    case nir_intrinsic_store_shared_block_intel:
2064    case nir_intrinsic_store_ssbo_block_intel:
2065    case nir_intrinsic_store_scratch:
2066       return LSC_OP_STORE;
2067 
2068    case nir_intrinsic_image_load:
2069    case nir_intrinsic_bindless_image_load:
2070       return LSC_OP_LOAD_CMASK;
2071 
2072    case nir_intrinsic_image_store:
2073    case nir_intrinsic_bindless_image_store:
2074       return LSC_OP_STORE_CMASK;
2075 
2076    default:
2077       assert(nir_intrinsic_has_atomic_op(intrin));
2078       break;
2079    }
2080 
2081    switch (nir_intrinsic_atomic_op(intrin)) {
2082    case nir_atomic_op_iadd: {
2083       unsigned src_idx;
2084       switch (intrin->intrinsic) {
2085       case nir_intrinsic_image_atomic:
2086       case nir_intrinsic_bindless_image_atomic:
2087          src_idx = 3;
2088          break;
2089       case nir_intrinsic_ssbo_atomic:
2090          src_idx = 2;
2091          break;
2092       case nir_intrinsic_shared_atomic:
2093       case nir_intrinsic_global_atomic:
2094          src_idx = 1;
2095          break;
2096       default:
2097          unreachable("Invalid add atomic opcode");
2098       }
2099 
2100       if (nir_src_is_const(intrin->src[src_idx])) {
2101          int64_t add_val = nir_src_as_int(intrin->src[src_idx]);
2102          if (add_val == 1)
2103             return LSC_OP_ATOMIC_INC;
2104          else if (add_val == -1)
2105             return LSC_OP_ATOMIC_DEC;
2106       }
2107       return LSC_OP_ATOMIC_ADD;
2108    }
2109 
2110    case nir_atomic_op_imin: return LSC_OP_ATOMIC_MIN;
2111    case nir_atomic_op_umin: return LSC_OP_ATOMIC_UMIN;
2112    case nir_atomic_op_imax: return LSC_OP_ATOMIC_MAX;
2113    case nir_atomic_op_umax: return LSC_OP_ATOMIC_UMAX;
2114    case nir_atomic_op_iand: return LSC_OP_ATOMIC_AND;
2115    case nir_atomic_op_ior:  return LSC_OP_ATOMIC_OR;
2116    case nir_atomic_op_ixor: return LSC_OP_ATOMIC_XOR;
2117    case nir_atomic_op_xchg: return LSC_OP_ATOMIC_STORE;
2118    case nir_atomic_op_cmpxchg: return LSC_OP_ATOMIC_CMPXCHG;
2119 
2120    case nir_atomic_op_fmin: return LSC_OP_ATOMIC_FMIN;
2121    case nir_atomic_op_fmax: return LSC_OP_ATOMIC_FMAX;
2122    case nir_atomic_op_fcmpxchg: return LSC_OP_ATOMIC_FCMPXCHG;
2123    case nir_atomic_op_fadd: return LSC_OP_ATOMIC_FADD;
2124 
2125    default:
2126       unreachable("Unsupported NIR atomic intrinsic");
2127    }
2128 }
2129 
2130 enum brw_reg_type
brw_type_for_nir_type(const struct intel_device_info * devinfo,nir_alu_type type)2131 brw_type_for_nir_type(const struct intel_device_info *devinfo,
2132                       nir_alu_type type)
2133 {
2134    switch (type) {
2135    case nir_type_uint:
2136    case nir_type_uint32:
2137       return BRW_TYPE_UD;
2138    case nir_type_bool:
2139    case nir_type_int:
2140    case nir_type_bool32:
2141    case nir_type_int32:
2142       return BRW_TYPE_D;
2143    case nir_type_float:
2144    case nir_type_float32:
2145       return BRW_TYPE_F;
2146    case nir_type_float16:
2147       return BRW_TYPE_HF;
2148    case nir_type_float64:
2149       return BRW_TYPE_DF;
2150    case nir_type_int64:
2151       return BRW_TYPE_Q;
2152    case nir_type_uint64:
2153       return BRW_TYPE_UQ;
2154    case nir_type_int16:
2155       return BRW_TYPE_W;
2156    case nir_type_uint16:
2157       return BRW_TYPE_UW;
2158    case nir_type_int8:
2159       return BRW_TYPE_B;
2160    case nir_type_uint8:
2161       return BRW_TYPE_UB;
2162    default:
2163       unreachable("unknown type");
2164    }
2165 
2166    return BRW_TYPE_F;
2167 }
2168 
2169 nir_shader *
brw_nir_create_passthrough_tcs(void * mem_ctx,const struct brw_compiler * compiler,const struct brw_tcs_prog_key * key)2170 brw_nir_create_passthrough_tcs(void *mem_ctx, const struct brw_compiler *compiler,
2171                                const struct brw_tcs_prog_key *key)
2172 {
2173    assert(key->input_vertices > 0);
2174 
2175    const nir_shader_compiler_options *options =
2176       compiler->nir_options[MESA_SHADER_TESS_CTRL];
2177 
2178    uint64_t inputs_read = key->outputs_written &
2179       ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
2180 
2181    unsigned locations[64];
2182    unsigned num_locations = 0;
2183 
2184    u_foreach_bit64(varying, inputs_read)
2185       locations[num_locations++] = varying;
2186 
2187    nir_shader *nir =
2188       nir_create_passthrough_tcs_impl(options, locations, num_locations,
2189                                       key->input_vertices);
2190 
2191    ralloc_steal(mem_ctx, nir);
2192 
2193    nir->info.inputs_read = inputs_read;
2194    nir->info.tess._primitive_mode = key->_tes_primitive_mode;
2195    nir_validate_shader(nir, "in brw_nir_create_passthrough_tcs");
2196 
2197    struct brw_nir_compiler_opts opts = {};
2198    brw_preprocess_nir(compiler, nir, &opts);
2199 
2200    return nir;
2201 }
2202 
2203 nir_def *
brw_nir_load_global_const(nir_builder * b,nir_intrinsic_instr * load_uniform,nir_def * base_addr,unsigned off)2204 brw_nir_load_global_const(nir_builder *b, nir_intrinsic_instr *load_uniform,
2205       nir_def *base_addr, unsigned off)
2206 {
2207    assert(load_uniform->intrinsic == nir_intrinsic_load_uniform);
2208 
2209    unsigned bit_size = load_uniform->def.bit_size;
2210    assert(bit_size >= 8 && bit_size % 8 == 0);
2211    unsigned byte_size = bit_size / 8;
2212    nir_def *sysval;
2213 
2214    if (nir_src_is_const(load_uniform->src[0])) {
2215       uint64_t offset = off +
2216                         nir_intrinsic_base(load_uniform) +
2217                         nir_src_as_uint(load_uniform->src[0]);
2218 
2219       /* Things should be component-aligned. */
2220       assert(offset % byte_size == 0);
2221 
2222       unsigned suboffset = offset % 64;
2223       uint64_t aligned_offset = offset - suboffset;
2224 
2225       /* Load two just in case we go over a 64B boundary */
2226       nir_def *data[2];
2227       for (unsigned i = 0; i < 2; i++) {
2228          nir_def *addr = nir_iadd_imm(b, base_addr, aligned_offset + i * 64);
2229 
2230          data[i] = nir_load_global_constant_uniform_block_intel(
2231             b, 16, 32, addr,
2232             .access = ACCESS_CAN_REORDER | ACCESS_NON_WRITEABLE,
2233             .align_mul = 64);
2234       }
2235 
2236       sysval = nir_extract_bits(b, data, 2, suboffset * 8,
2237                                 load_uniform->num_components, bit_size);
2238    } else {
2239       nir_def *offset32 =
2240          nir_iadd_imm(b, load_uniform->src[0].ssa,
2241                          off + nir_intrinsic_base(load_uniform));
2242       nir_def *addr = nir_iadd(b, base_addr, nir_u2u64(b, offset32));
2243       sysval = nir_load_global_constant(b, addr, byte_size,
2244                                         load_uniform->num_components, bit_size);
2245    }
2246 
2247    return sysval;
2248 }
2249 
2250 const struct glsl_type *
brw_nir_get_var_type(const struct nir_shader * nir,nir_variable * var)2251 brw_nir_get_var_type(const struct nir_shader *nir, nir_variable *var)
2252 {
2253    const struct glsl_type *type = var->interface_type;
2254    if (!type) {
2255       type = var->type;
2256       if (nir_is_arrayed_io(var, nir->info.stage) || var->data.per_view) {
2257          assert(glsl_type_is_array(type));
2258          type = glsl_get_array_element(type);
2259       }
2260    }
2261 
2262    return type;
2263 }
2264