xref: /aosp_15_r20/external/mesa3d/src/intel/compiler/elk/elk_nir.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "../intel_nir.h"
25 #include "elk_nir.h"
26 #include "elk_nir_private.h"
27 #include "elk_shader.h"
28 #include "dev/intel_debug.h"
29 #include "compiler/glsl_types.h"
30 #include "compiler/nir/nir_builder.h"
31 #include "util/u_math.h"
32 
33 static bool
remap_tess_levels(nir_builder * b,nir_intrinsic_instr * intr,enum tess_primitive_mode _primitive_mode)34 remap_tess_levels(nir_builder *b, nir_intrinsic_instr *intr,
35                   enum tess_primitive_mode _primitive_mode)
36 {
37    const int location = nir_intrinsic_base(intr);
38    const unsigned component = nir_intrinsic_component(intr);
39    bool out_of_bounds = false;
40    bool write = !nir_intrinsic_infos[intr->intrinsic].has_dest;
41    unsigned mask = write ? nir_intrinsic_write_mask(intr) : 0;
42    nir_def *src = NULL, *dest = NULL;
43 
44    if (write) {
45       assert(intr->num_components == intr->src[0].ssa->num_components);
46    } else {
47       assert(intr->num_components == intr->def.num_components);
48    }
49 
50    if (location == VARYING_SLOT_TESS_LEVEL_INNER) {
51       b->cursor = write ? nir_before_instr(&intr->instr)
52                         : nir_after_instr(&intr->instr);
53 
54       switch (_primitive_mode) {
55       case TESS_PRIMITIVE_QUADS:
56          /* gl_TessLevelInner[0..1] lives at DWords 3-2 (reversed). */
57          nir_intrinsic_set_base(intr, 0);
58 
59          if (write) {
60             assert(intr->src[0].ssa->num_components == 2);
61 
62             intr->num_components = 4;
63 
64             nir_def *undef = nir_undef(b, 1, 32);
65             nir_def *x = nir_channel(b, intr->src[0].ssa, 0);
66             nir_def *y = nir_channel(b, intr->src[0].ssa, 1);
67             src = nir_vec4(b, undef, undef, y, x);
68             mask = !!(mask & WRITEMASK_X) << 3 | !!(mask & WRITEMASK_Y) << 2;
69          } else if (intr->def.num_components > 1) {
70             assert(intr->def.num_components == 2);
71 
72             intr->num_components = 4;
73             intr->def.num_components = 4;
74 
75             unsigned wz[2] = { 3, 2 };
76             dest = nir_swizzle(b, &intr->def, wz, 2);
77          } else {
78             nir_intrinsic_set_component(intr, 3 - component);
79          }
80          break;
81       case TESS_PRIMITIVE_TRIANGLES:
82          /* gl_TessLevelInner[0] lives at DWord 4. */
83          nir_intrinsic_set_base(intr, 1);
84          mask &= WRITEMASK_X;
85          out_of_bounds = component > 0;
86          break;
87       case TESS_PRIMITIVE_ISOLINES:
88          out_of_bounds = true;
89          break;
90       default:
91          unreachable("Bogus tessellation domain");
92       }
93    } else if (location == VARYING_SLOT_TESS_LEVEL_OUTER) {
94       b->cursor = write ? nir_before_instr(&intr->instr)
95                         : nir_after_instr(&intr->instr);
96 
97       nir_intrinsic_set_base(intr, 1);
98 
99       switch (_primitive_mode) {
100       case TESS_PRIMITIVE_QUADS:
101       case TESS_PRIMITIVE_TRIANGLES:
102          /* Quads:     gl_TessLevelOuter[0..3] lives at DWords 7-4 (reversed).
103           * Triangles: gl_TessLevelOuter[0..2] lives at DWords 7-5 (reversed).
104           */
105          if (write) {
106             assert(intr->src[0].ssa->num_components == 4);
107 
108             unsigned wzyx[4] = { 3, 2, 1, 0 };
109             src = nir_swizzle(b, intr->src[0].ssa, wzyx, 4);
110             mask = !!(mask & WRITEMASK_X) << 3 | !!(mask & WRITEMASK_Y) << 2 |
111                    !!(mask & WRITEMASK_Z) << 1 | !!(mask & WRITEMASK_W) << 0;
112 
113             /* Don't overwrite the inner factor at DWord 4 for triangles */
114             if (_primitive_mode == TESS_PRIMITIVE_TRIANGLES)
115                mask &= ~WRITEMASK_X;
116          } else if (intr->def.num_components > 1) {
117             assert(intr->def.num_components == 4);
118 
119             unsigned wzyx[4] = { 3, 2, 1, 0 };
120             dest = nir_swizzle(b, &intr->def, wzyx, 4);
121          } else {
122             nir_intrinsic_set_component(intr, 3 - component);
123             out_of_bounds = component == 3 &&
124                             _primitive_mode == TESS_PRIMITIVE_TRIANGLES;
125          }
126          break;
127       case TESS_PRIMITIVE_ISOLINES:
128          /* gl_TessLevelOuter[0..1] lives at DWords 6-7 (in order). */
129          if (write) {
130             assert(intr->src[0].ssa->num_components == 4);
131 
132             nir_def *undef = nir_undef(b, 1, 32);
133             nir_def *x = nir_channel(b, intr->src[0].ssa, 0);
134             nir_def *y = nir_channel(b, intr->src[0].ssa, 1);
135             src = nir_vec4(b, undef, undef, x, y);
136             mask = !!(mask & WRITEMASK_X) << 2 | !!(mask & WRITEMASK_Y) << 3;
137          } else {
138             nir_intrinsic_set_component(intr, 2 + component);
139             out_of_bounds = component > 1;
140          }
141          break;
142       default:
143          unreachable("Bogus tessellation domain");
144       }
145    } else {
146       return false;
147    }
148 
149    if (out_of_bounds) {
150       if (!write)
151          nir_def_rewrite_uses(&intr->def, nir_undef(b, 1, 32));
152       nir_instr_remove(&intr->instr);
153    } else if (write) {
154       nir_intrinsic_set_write_mask(intr, mask);
155 
156       if (src) {
157          nir_src_rewrite(&intr->src[0], src);
158       }
159    } else if (dest) {
160       nir_def_rewrite_uses_after(&intr->def, dest,
161                                      dest->parent_instr);
162    }
163 
164    return true;
165 }
166 
167 static bool
is_input(nir_intrinsic_instr * intrin)168 is_input(nir_intrinsic_instr *intrin)
169 {
170    return intrin->intrinsic == nir_intrinsic_load_input ||
171           intrin->intrinsic == nir_intrinsic_load_per_primitive_input ||
172           intrin->intrinsic == nir_intrinsic_load_per_vertex_input ||
173           intrin->intrinsic == nir_intrinsic_load_interpolated_input;
174 }
175 
176 static bool
is_output(nir_intrinsic_instr * intrin)177 is_output(nir_intrinsic_instr *intrin)
178 {
179    return intrin->intrinsic == nir_intrinsic_load_output ||
180           intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
181           intrin->intrinsic == nir_intrinsic_store_output ||
182           intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
183 }
184 
185 
186 static bool
remap_patch_urb_offsets(nir_block * block,nir_builder * b,const struct intel_vue_map * vue_map,enum tess_primitive_mode tes_primitive_mode)187 remap_patch_urb_offsets(nir_block *block, nir_builder *b,
188                         const struct intel_vue_map *vue_map,
189                         enum tess_primitive_mode tes_primitive_mode)
190 {
191    nir_foreach_instr_safe(instr, block) {
192       if (instr->type != nir_instr_type_intrinsic)
193          continue;
194 
195       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
196 
197       gl_shader_stage stage = b->shader->info.stage;
198 
199       if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
200           (stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
201 
202          if (remap_tess_levels(b, intrin, tes_primitive_mode))
203             continue;
204 
205          int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
206          assert(vue_slot != -1);
207          intrin->const_index[0] = vue_slot;
208 
209          nir_src *vertex = nir_get_io_arrayed_index_src(intrin);
210          if (vertex) {
211             if (nir_src_is_const(*vertex)) {
212                intrin->const_index[0] += nir_src_as_uint(*vertex) *
213                                          vue_map->num_per_vertex_slots;
214             } else {
215                b->cursor = nir_before_instr(&intrin->instr);
216 
217                /* Multiply by the number of per-vertex slots. */
218                nir_def *vertex_offset =
219                   nir_imul(b,
220                            vertex->ssa,
221                            nir_imm_int(b,
222                                        vue_map->num_per_vertex_slots));
223 
224                /* Add it to the existing offset */
225                nir_src *offset = nir_get_io_offset_src(intrin);
226                nir_def *total_offset =
227                   nir_iadd(b, vertex_offset,
228                            offset->ssa);
229 
230                nir_src_rewrite(offset, total_offset);
231             }
232          }
233       }
234    }
235    return true;
236 }
237 
238 void
elk_nir_lower_vs_inputs(nir_shader * nir,bool edgeflag_is_last,const uint8_t * vs_attrib_wa_flags)239 elk_nir_lower_vs_inputs(nir_shader *nir,
240                         bool edgeflag_is_last,
241                         const uint8_t *vs_attrib_wa_flags)
242 {
243    /* Start with the location of the variable's base. */
244    nir_foreach_shader_in_variable(var, nir)
245       var->data.driver_location = var->data.location;
246 
247    /* Now use nir_lower_io to walk dereference chains.  Attribute arrays are
248     * loaded as one vec4 or dvec4 per element (or matrix column), depending on
249     * whether it is a double-precision type or not.
250     */
251    nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4,
252                 nir_lower_io_lower_64bit_to_32);
253 
254    /* This pass needs actual constants */
255    nir_opt_constant_folding(nir);
256 
257    nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
258 
259    elk_nir_apply_attribute_workarounds(nir, vs_attrib_wa_flags);
260 
261    /* The last step is to remap VERT_ATTRIB_* to actual registers */
262 
263    /* Whether or not we have any system generated values.  gl_DrawID is not
264     * included here as it lives in its own vec4.
265     */
266    const bool has_sgvs =
267       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_FIRST_VERTEX) ||
268       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_BASE_INSTANCE) ||
269       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) ||
270       BITSET_TEST(nir->info.system_values_read, SYSTEM_VALUE_INSTANCE_ID);
271 
272    const unsigned num_inputs = util_bitcount64(nir->info.inputs_read);
273 
274    nir_foreach_function_impl(impl, nir) {
275       nir_builder b = nir_builder_create(impl);
276 
277       nir_foreach_block(block, impl) {
278          nir_foreach_instr_safe(instr, block) {
279             if (instr->type != nir_instr_type_intrinsic)
280                continue;
281 
282             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
283 
284             switch (intrin->intrinsic) {
285             case nir_intrinsic_load_first_vertex:
286             case nir_intrinsic_load_base_instance:
287             case nir_intrinsic_load_vertex_id_zero_base:
288             case nir_intrinsic_load_instance_id:
289             case nir_intrinsic_load_is_indexed_draw:
290             case nir_intrinsic_load_draw_id: {
291                b.cursor = nir_after_instr(&intrin->instr);
292 
293                /* gl_VertexID and friends are stored by the VF as the last
294                 * vertex element.  We convert them to load_input intrinsics at
295                 * the right location.
296                 */
297                nir_intrinsic_instr *load =
298                   nir_intrinsic_instr_create(nir, nir_intrinsic_load_input);
299                load->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
300 
301                nir_intrinsic_set_base(load, num_inputs);
302                switch (intrin->intrinsic) {
303                case nir_intrinsic_load_first_vertex:
304                   nir_intrinsic_set_component(load, 0);
305                   break;
306                case nir_intrinsic_load_base_instance:
307                   nir_intrinsic_set_component(load, 1);
308                   break;
309                case nir_intrinsic_load_vertex_id_zero_base:
310                   nir_intrinsic_set_component(load, 2);
311                   break;
312                case nir_intrinsic_load_instance_id:
313                   nir_intrinsic_set_component(load, 3);
314                   break;
315                case nir_intrinsic_load_draw_id:
316                case nir_intrinsic_load_is_indexed_draw:
317                   /* gl_DrawID and IsIndexedDraw are stored right after
318                    * gl_VertexID and friends if any of them exist.
319                    */
320                   nir_intrinsic_set_base(load, num_inputs + has_sgvs);
321                   if (intrin->intrinsic == nir_intrinsic_load_draw_id)
322                      nir_intrinsic_set_component(load, 0);
323                   else
324                      nir_intrinsic_set_component(load, 1);
325                   break;
326                default:
327                   unreachable("Invalid system value intrinsic");
328                }
329 
330                load->num_components = 1;
331                nir_def_init(&load->instr, &load->def, 1, 32);
332                nir_builder_instr_insert(&b, &load->instr);
333 
334                nir_def_replace(&intrin->def, &load->def);
335                break;
336             }
337 
338             case nir_intrinsic_load_input: {
339                /* Attributes come in a contiguous block, ordered by their
340                 * gl_vert_attrib value.  That means we can compute the slot
341                 * number for an attribute by masking out the enabled attributes
342                 * before it and counting the bits.
343                 */
344                int attr = nir_intrinsic_base(intrin);
345                uint64_t inputs_read = nir->info.inputs_read;
346                int slot = -1;
347                if (edgeflag_is_last) {
348                   inputs_read &= ~BITFIELD64_BIT(VERT_ATTRIB_EDGEFLAG);
349                   if (attr == VERT_ATTRIB_EDGEFLAG)
350                      slot = num_inputs - 1;
351                }
352                if (slot == -1)
353                   slot = util_bitcount64(inputs_read &
354                                          BITFIELD64_MASK(attr));
355                nir_intrinsic_set_base(intrin, slot);
356                break;
357             }
358 
359             default:
360                break; /* Nothing to do */
361             }
362          }
363       }
364    }
365 }
366 
367 void
elk_nir_lower_vue_inputs(nir_shader * nir,const struct intel_vue_map * vue_map)368 elk_nir_lower_vue_inputs(nir_shader *nir,
369                          const struct intel_vue_map *vue_map)
370 {
371    nir_foreach_shader_in_variable(var, nir)
372       var->data.driver_location = var->data.location;
373 
374    /* Inputs are stored in vec4 slots, so use elk_type_size_vec4(). */
375    nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4,
376                 nir_lower_io_lower_64bit_to_32);
377 
378    /* This pass needs actual constants */
379    nir_opt_constant_folding(nir);
380 
381    nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
382 
383    nir_foreach_function_impl(impl, nir) {
384       nir_foreach_block(block, impl) {
385          nir_foreach_instr(instr, block) {
386             if (instr->type != nir_instr_type_intrinsic)
387                continue;
388 
389             nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
390 
391             if (intrin->intrinsic == nir_intrinsic_load_input ||
392                 intrin->intrinsic == nir_intrinsic_load_per_vertex_input) {
393                /* Offset 0 is the VUE header, which contains
394                 * VARYING_SLOT_LAYER [.y], VARYING_SLOT_VIEWPORT [.z], and
395                 * VARYING_SLOT_PSIZ [.w].
396                 */
397                int varying = nir_intrinsic_base(intrin);
398                int vue_slot;
399                switch (varying) {
400                case VARYING_SLOT_PSIZ:
401                   nir_intrinsic_set_base(intrin, 0);
402                   nir_intrinsic_set_component(intrin, 3);
403                   break;
404 
405                default:
406                   vue_slot = vue_map->varying_to_slot[varying];
407                   assert(vue_slot != -1);
408                   nir_intrinsic_set_base(intrin, vue_slot);
409                   break;
410                }
411             }
412          }
413       }
414    }
415 }
416 
417 void
elk_nir_lower_tes_inputs(nir_shader * nir,const struct intel_vue_map * vue_map)418 elk_nir_lower_tes_inputs(nir_shader *nir, const struct intel_vue_map *vue_map)
419 {
420    nir_foreach_shader_in_variable(var, nir)
421       var->data.driver_location = var->data.location;
422 
423    nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4,
424                 nir_lower_io_lower_64bit_to_32);
425 
426    /* This pass needs actual constants */
427    nir_opt_constant_folding(nir);
428 
429    nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
430 
431    nir_foreach_function_impl(impl, nir) {
432       nir_builder b = nir_builder_create(impl);
433       nir_foreach_block(block, impl) {
434          remap_patch_urb_offsets(block, &b, vue_map,
435                                  nir->info.tess._primitive_mode);
436       }
437    }
438 }
439 
440 static bool
lower_barycentric_per_sample(nir_builder * b,nir_intrinsic_instr * intrin,UNUSED void * cb_data)441 lower_barycentric_per_sample(nir_builder *b,
442                              nir_intrinsic_instr *intrin,
443                              UNUSED void *cb_data)
444 {
445    if (intrin->intrinsic != nir_intrinsic_load_barycentric_pixel &&
446        intrin->intrinsic != nir_intrinsic_load_barycentric_centroid)
447       return false;
448 
449    b->cursor = nir_before_instr(&intrin->instr);
450    nir_def *centroid =
451       nir_load_barycentric(b, nir_intrinsic_load_barycentric_sample,
452                            nir_intrinsic_interp_mode(intrin));
453    nir_def_replace(&intrin->def, centroid);
454    return true;
455 }
456 
457 /**
458  * Convert interpolateAtOffset() offsets from [-0.5, +0.5] floating point
459  * offsets to integer [-8, +7] offsets (in units of 1/16th of a pixel).
460  *
461  * We clamp to +7/16 on the upper end of the range, since +0.5 isn't
462  * representable in a S0.4 value; a naive conversion would give us -8/16,
463  * which is the opposite of what was intended.
464  *
465  * This is allowed by GL_ARB_gpu_shader5's quantization rules:
466  *
467  *    "Not all values of <offset> may be supported; x and y offsets may
468  *     be rounded to fixed-point values with the number of fraction bits
469  *     given by the implementation-dependent constant
470  *     FRAGMENT_INTERPOLATION_OFFSET_BITS."
471  */
472 static bool
lower_barycentric_at_offset(nir_builder * b,nir_intrinsic_instr * intrin,void * data)473 lower_barycentric_at_offset(nir_builder *b, nir_intrinsic_instr *intrin,
474                             void *data)
475 {
476    if (intrin->intrinsic != nir_intrinsic_load_barycentric_at_offset)
477       return false;
478 
479    b->cursor = nir_before_instr(&intrin->instr);
480 
481    assert(intrin->src[0].ssa);
482    nir_def *offset =
483       nir_imin(b, nir_imm_int(b, 7),
484                nir_f2i32(b, nir_fmul_imm(b, intrin->src[0].ssa, 16)));
485 
486    nir_src_rewrite(&intrin->src[0], offset);
487 
488    return true;
489 }
490 
491 void
elk_nir_lower_fs_inputs(nir_shader * nir,const struct intel_device_info * devinfo,const struct elk_wm_prog_key * key)492 elk_nir_lower_fs_inputs(nir_shader *nir,
493                         const struct intel_device_info *devinfo,
494                         const struct elk_wm_prog_key *key)
495 {
496    nir_foreach_shader_in_variable(var, nir) {
497       var->data.driver_location = var->data.location;
498 
499       /* Apply default interpolation mode.
500        *
501        * Everything defaults to smooth except for the legacy GL color
502        * built-in variables, which might be flat depending on API state.
503        */
504       if (var->data.interpolation == INTERP_MODE_NONE) {
505          const bool flat = key->flat_shade &&
506             (var->data.location == VARYING_SLOT_COL0 ||
507              var->data.location == VARYING_SLOT_COL1);
508 
509          var->data.interpolation = flat ? INTERP_MODE_FLAT
510                                         : INTERP_MODE_SMOOTH;
511       }
512 
513       /* On Ironlake and below, there is only one interpolation mode.
514        * Centroid interpolation doesn't mean anything on this hardware --
515        * there is no multisampling.
516        */
517       if (devinfo->ver < 6) {
518          var->data.centroid = false;
519          var->data.sample = false;
520       }
521    }
522 
523    nir_lower_io(nir, nir_var_shader_in, elk_type_size_vec4,
524                 nir_lower_io_lower_64bit_to_32);
525 
526    if (key->multisample_fbo == ELK_NEVER) {
527       nir_lower_single_sampled(nir);
528    } else if (key->persample_interp == ELK_ALWAYS) {
529       nir_shader_intrinsics_pass(nir, lower_barycentric_per_sample,
530                                    nir_metadata_control_flow,
531                                    NULL);
532    }
533 
534    nir_shader_intrinsics_pass(nir, lower_barycentric_at_offset,
535                                 nir_metadata_control_flow,
536                                 NULL);
537 
538    /* This pass needs actual constants */
539    nir_opt_constant_folding(nir);
540 
541    nir_io_add_const_offset_to_base(nir, nir_var_shader_in);
542 }
543 
544 void
elk_nir_lower_vue_outputs(nir_shader * nir)545 elk_nir_lower_vue_outputs(nir_shader *nir)
546 {
547    nir_foreach_shader_out_variable(var, nir) {
548       var->data.driver_location = var->data.location;
549    }
550 
551    nir_lower_io(nir, nir_var_shader_out, elk_type_size_vec4,
552                 nir_lower_io_lower_64bit_to_32);
553 }
554 
555 void
elk_nir_lower_tcs_outputs(nir_shader * nir,const struct intel_vue_map * vue_map,enum tess_primitive_mode tes_primitive_mode)556 elk_nir_lower_tcs_outputs(nir_shader *nir, const struct intel_vue_map *vue_map,
557                           enum tess_primitive_mode tes_primitive_mode)
558 {
559    nir_foreach_shader_out_variable(var, nir) {
560       var->data.driver_location = var->data.location;
561    }
562 
563    nir_lower_io(nir, nir_var_shader_out, elk_type_size_vec4,
564                 nir_lower_io_lower_64bit_to_32);
565 
566    /* This pass needs actual constants */
567    nir_opt_constant_folding(nir);
568 
569    nir_io_add_const_offset_to_base(nir, nir_var_shader_out);
570 
571    nir_foreach_function_impl(impl, nir) {
572       nir_builder b = nir_builder_create(impl);
573       nir_foreach_block(block, impl) {
574          remap_patch_urb_offsets(block, &b, vue_map, tes_primitive_mode);
575       }
576    }
577 }
578 
579 void
elk_nir_lower_fs_outputs(nir_shader * nir)580 elk_nir_lower_fs_outputs(nir_shader *nir)
581 {
582    nir_foreach_shader_out_variable(var, nir) {
583       var->data.driver_location =
584          SET_FIELD(var->data.index, ELK_NIR_FRAG_OUTPUT_INDEX) |
585          SET_FIELD(var->data.location, ELK_NIR_FRAG_OUTPUT_LOCATION);
586    }
587 
588    nir_lower_io(nir, nir_var_shader_out, elk_type_size_dvec4, 0);
589 }
590 
591 #define OPT(pass, ...) ({                                  \
592    bool this_progress = false;                             \
593    NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__);      \
594    if (this_progress)                                      \
595       progress = true;                                     \
596    this_progress;                                          \
597 })
598 
599 void
elk_nir_optimize(nir_shader * nir,bool is_scalar,const struct intel_device_info * devinfo)600 elk_nir_optimize(nir_shader *nir, bool is_scalar,
601                  const struct intel_device_info *devinfo)
602 {
603    bool progress;
604    unsigned lower_flrp =
605       (nir->options->lower_flrp16 ? 16 : 0) |
606       (nir->options->lower_flrp32 ? 32 : 0) |
607       (nir->options->lower_flrp64 ? 64 : 0);
608 
609    do {
610       progress = false;
611       OPT(nir_shrink_vec_array_vars, nir_var_function_temp);
612       OPT(nir_opt_deref);
613       if (OPT(nir_opt_memcpy))
614          OPT(nir_split_var_copies);
615       OPT(nir_lower_vars_to_ssa);
616       if (!nir->info.var_copies_lowered) {
617          /* Only run this pass if nir_lower_var_copies was not called
618           * yet. That would lower away any copy_deref instructions and we
619           * don't want to introduce any more.
620           */
621          OPT(nir_opt_find_array_copies);
622       }
623       OPT(nir_opt_copy_prop_vars);
624       OPT(nir_opt_dead_write_vars);
625       OPT(nir_opt_combine_stores, nir_var_all);
626 
627       if (is_scalar) {
628          OPT(nir_lower_alu_to_scalar, NULL, NULL);
629       } else {
630          OPT(nir_opt_shrink_stores, true);
631          OPT(nir_opt_shrink_vectors, false);
632       }
633 
634       OPT(nir_copy_prop);
635 
636       if (is_scalar) {
637          OPT(nir_lower_phis_to_scalar, false);
638       }
639 
640       OPT(nir_copy_prop);
641       OPT(nir_opt_dce);
642       OPT(nir_opt_cse);
643       OPT(nir_opt_combine_stores, nir_var_all);
644 
645       /* Passing 0 to the peephole select pass causes it to convert
646        * if-statements that contain only move instructions in the branches
647        * regardless of the count.
648        *
649        * Passing 1 to the peephole select pass causes it to convert
650        * if-statements that contain at most a single ALU instruction (total)
651        * in both branches.  Before Gfx6, some math instructions were
652        * prohibitively expensive and the results of compare operations need an
653        * extra resolve step.  For these reasons, this pass is more harmful
654        * than good on those platforms.
655        *
656        * For indirect loads of uniforms (push constants), we assume that array
657        * indices will nearly always be in bounds and the cost of the load is
658        * low.  Therefore there shouldn't be a performance benefit to avoid it.
659        * However, in vec4 tessellation shaders, these loads operate by
660        * actually pulling from memory.
661        */
662       const bool is_vec4_tessellation = !is_scalar &&
663          (nir->info.stage == MESA_SHADER_TESS_CTRL ||
664           nir->info.stage == MESA_SHADER_TESS_EVAL);
665       OPT(nir_opt_peephole_select, 0, !is_vec4_tessellation, false);
666       OPT(nir_opt_peephole_select, 8, !is_vec4_tessellation,
667           devinfo->ver >= 6);
668 
669       OPT(nir_opt_intrinsics);
670       OPT(nir_opt_idiv_const, 32);
671       OPT(nir_opt_algebraic);
672 
673       /* BFI2 did not exist until Gfx7, so there's no point in trying to
674        * optimize an instruction that should not get generated.
675        */
676       if (devinfo->ver >= 7)
677          OPT(nir_opt_reassociate_bfi);
678 
679       OPT(nir_lower_constant_convert_alu_types);
680       OPT(nir_opt_constant_folding);
681 
682       if (lower_flrp != 0) {
683          if (OPT(nir_lower_flrp,
684                  lower_flrp,
685                  false /* always_precise */)) {
686             OPT(nir_opt_constant_folding);
687          }
688 
689          /* Nothing should rematerialize any flrps, so we only need to do this
690           * lowering once.
691           */
692          lower_flrp = 0;
693       }
694 
695       OPT(nir_opt_dead_cf);
696       if (OPT(nir_opt_loop)) {
697          /* If nir_opt_loop makes progress, then we need to clean
698           * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
699           * to make progress.
700           */
701          OPT(nir_copy_prop);
702          OPT(nir_opt_dce);
703       }
704       OPT(nir_opt_if, nir_opt_if_optimize_phi_true_false);
705       OPT(nir_opt_conditional_discard);
706       if (nir->options->max_unroll_iterations != 0) {
707          OPT(nir_opt_loop_unroll);
708       }
709       OPT(nir_opt_remove_phis);
710       OPT(nir_opt_gcm, false);
711       OPT(nir_opt_undef);
712       OPT(nir_lower_pack);
713    } while (progress);
714 
715    /* Workaround Gfxbench unused local sampler variable which will trigger an
716     * assert in the opt_large_constants pass.
717     */
718    OPT(nir_remove_dead_variables, nir_var_function_temp, NULL);
719 }
720 
721 static unsigned
lower_bit_size_callback(const nir_instr * instr,UNUSED void * data)722 lower_bit_size_callback(const nir_instr *instr, UNUSED void *data)
723 {
724    switch (instr->type) {
725    case nir_instr_type_alu: {
726       nir_alu_instr *alu = nir_instr_as_alu(instr);
727       switch (alu->op) {
728       case nir_op_bit_count:
729       case nir_op_ufind_msb:
730       case nir_op_ifind_msb:
731       case nir_op_find_lsb:
732          /* These are handled specially because the destination is always
733           * 32-bit and so the bit size of the instruction is given by the
734           * source.
735           */
736          return alu->src[0].src.ssa->bit_size >= 32 ? 0 : 32;
737       default:
738          break;
739       }
740 
741       if (alu->def.bit_size >= 32)
742          return 0;
743 
744       /* Note: nir_op_iabs and nir_op_ineg are not lowered here because the
745        * 8-bit ABS or NEG instruction should eventually get copy propagated
746        * into the MOV that does the type conversion.  This results in far
747        * fewer MOV instructions.
748        */
749       switch (alu->op) {
750       case nir_op_idiv:
751       case nir_op_imod:
752       case nir_op_irem:
753       case nir_op_udiv:
754       case nir_op_umod:
755       case nir_op_fceil:
756       case nir_op_ffloor:
757       case nir_op_ffract:
758       case nir_op_fround_even:
759       case nir_op_ftrunc:
760          return 32;
761       case nir_op_frcp:
762       case nir_op_frsq:
763       case nir_op_fsqrt:
764       case nir_op_fpow:
765       case nir_op_fexp2:
766       case nir_op_flog2:
767       case nir_op_fsin:
768       case nir_op_fcos:
769          return 32;
770       case nir_op_isign:
771          assert(!"Should have been lowered by nir_opt_algebraic.");
772          return 0;
773       default:
774          if (nir_op_infos[alu->op].num_inputs >= 2 &&
775              alu->def.bit_size == 8)
776             return 16;
777 
778          if (nir_alu_instr_is_comparison(alu) &&
779              alu->src[0].src.ssa->bit_size == 8)
780             return 16;
781 
782          return 0;
783       }
784       break;
785    }
786 
787    case nir_instr_type_intrinsic: {
788       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
789       switch (intrin->intrinsic) {
790       case nir_intrinsic_read_invocation:
791       case nir_intrinsic_read_first_invocation:
792       case nir_intrinsic_vote_feq:
793       case nir_intrinsic_vote_ieq:
794       case nir_intrinsic_shuffle:
795       case nir_intrinsic_shuffle_xor:
796       case nir_intrinsic_shuffle_up:
797       case nir_intrinsic_shuffle_down:
798       case nir_intrinsic_quad_broadcast:
799       case nir_intrinsic_quad_swap_horizontal:
800       case nir_intrinsic_quad_swap_vertical:
801       case nir_intrinsic_quad_swap_diagonal:
802          if (intrin->src[0].ssa->bit_size == 8)
803             return 16;
804          return 0;
805 
806       case nir_intrinsic_reduce:
807       case nir_intrinsic_inclusive_scan:
808       case nir_intrinsic_exclusive_scan:
809          /* There are a couple of register region issues that make things
810           * complicated for 8-bit types:
811           *
812           *    1. Only raw moves are allowed to write to a packed 8-bit
813           *       destination.
814           *    2. If we use a strided destination, the efficient way to do
815           *       scan operations ends up using strides that are too big to
816           *       encode in an instruction.
817           *
818           * To get around these issues, we just do all 8-bit scan operations
819           * in 16 bits.  It's actually fewer instructions than what we'd have
820           * to do if we were trying to do it in native 8-bit types and the
821           * results are the same once we truncate to 8 bits at the end.
822           */
823          if (intrin->def.bit_size == 8)
824             return 16;
825          return 0;
826 
827       default:
828          return 0;
829       }
830       break;
831    }
832 
833    case nir_instr_type_phi: {
834       nir_phi_instr *phi = nir_instr_as_phi(instr);
835       if (phi->def.bit_size == 8)
836          return 16;
837       return 0;
838    }
839 
840    default:
841       return 0;
842    }
843 }
844 
845 /* On gfx12.5+, if the offsets are not both constant and in the {-8,7} range,
846  * we will have nir_lower_tex() lower the source offset by returning true from
847  * this filter function.
848  */
849 static bool
lower_xehp_tg4_offset_filter(const nir_instr * instr,UNUSED const void * data)850 lower_xehp_tg4_offset_filter(const nir_instr *instr, UNUSED const void *data)
851 {
852    if (instr->type != nir_instr_type_tex)
853       return false;
854 
855    nir_tex_instr *tex = nir_instr_as_tex(instr);
856 
857    if (tex->op != nir_texop_tg4)
858       return false;
859 
860    int offset_index = nir_tex_instr_src_index(tex, nir_tex_src_offset);
861    if (offset_index < 0)
862       return false;
863 
864    if (!nir_src_is_const(tex->src[offset_index].src))
865       return true;
866 
867    int64_t offset_x = nir_src_comp_as_int(tex->src[offset_index].src, 0);
868    int64_t offset_y = nir_src_comp_as_int(tex->src[offset_index].src, 1);
869 
870    return offset_x < -8 || offset_x > 7 || offset_y < -8 || offset_y > 7;
871 }
872 
873 /* Does some simple lowering and runs the standard suite of optimizations
874  *
875  * This is intended to be called more-or-less directly after you get the
876  * shader out of GLSL or some other source.  While it is geared towards i965,
877  * it is not at all generator-specific.
878  */
879 void
elk_preprocess_nir(const struct elk_compiler * compiler,nir_shader * nir,const struct elk_nir_compiler_opts * opts)880 elk_preprocess_nir(const struct elk_compiler *compiler, nir_shader *nir,
881                    const struct elk_nir_compiler_opts *opts)
882 {
883    const struct intel_device_info *devinfo = compiler->devinfo;
884    UNUSED bool progress; /* Written by OPT */
885 
886    const bool is_scalar = compiler->scalar_stage[nir->info.stage];
887 
888    nir_validate_ssa_dominance(nir, "before elk_preprocess_nir");
889 
890    OPT(nir_lower_frexp);
891 
892    if (is_scalar) {
893       OPT(nir_lower_alu_to_scalar, NULL, NULL);
894    }
895 
896    if (nir->info.stage == MESA_SHADER_GEOMETRY)
897       OPT(nir_lower_gs_intrinsics, 0);
898 
899    /* See also elk_nir_trig_workarounds.py */
900    if (compiler->precise_trig)
901       OPT(elk_nir_apply_trig_workarounds);
902 
903    /* This workaround existing for performance reasons. Since it requires not
904     * setting RENDER_SURFACE_STATE::SurfaceArray when the array length is 1,
905     * we're loosing the HW robustness feature in that case.
906     *
907     * So when robust image access is enabled, just avoid the workaround.
908     */
909    if (intel_needs_workaround(devinfo, 1806565034) && !opts->robust_image_access)
910       OPT(intel_nir_clamp_image_1d_2d_array_sizes);
911 
912    const nir_lower_tex_options tex_options = {
913       .lower_txp = ~0,
914       .lower_txf_offset = true,
915       .lower_rect_offset = true,
916       .lower_txd_cube_map = true,
917       .lower_txb_shadow_clamp = true,
918       .lower_txd_shadow_clamp = true,
919       .lower_txd_offset_clamp = true,
920       .lower_tg4_offsets = true,
921       .lower_txs_lod = true, /* Wa_14012320009 */
922       .lower_invalid_implicit_lod = true,
923    };
924 
925    OPT(nir_lower_tex, &tex_options);
926    OPT(nir_normalize_cubemap_coords);
927 
928    OPT(nir_lower_global_vars_to_local);
929 
930    OPT(nir_split_var_copies);
931    OPT(nir_split_struct_vars, nir_var_function_temp);
932 
933    elk_nir_optimize(nir, is_scalar, devinfo);
934 
935    OPT(nir_lower_doubles, opts->softfp64, nir->options->lower_doubles_options);
936    if (OPT(nir_lower_int64_float_conversions)) {
937       OPT(nir_opt_algebraic);
938       OPT(nir_lower_doubles, opts->softfp64,
939           nir->options->lower_doubles_options);
940    }
941 
942    OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
943 
944    /* Lower a bunch of stuff */
945    OPT(nir_lower_var_copies);
946 
947    /* This needs to be run after the first optimization pass but before we
948     * lower indirect derefs away
949     */
950    if (compiler->supports_shader_constants) {
951       OPT(nir_opt_large_constants, NULL, 32);
952    }
953 
954    if (is_scalar) {
955       OPT(nir_lower_load_const_to_scalar);
956    }
957 
958    OPT(nir_lower_system_values);
959    nir_lower_compute_system_values_options lower_csv_options = {
960       .has_base_workgroup_id = nir->info.stage == MESA_SHADER_COMPUTE,
961    };
962    OPT(nir_lower_compute_system_values, &lower_csv_options);
963 
964    const nir_lower_subgroups_options subgroups_options = {
965       .ballot_bit_size = 32,
966       .ballot_components = 1,
967       .lower_to_scalar = true,
968       .lower_vote_trivial = !is_scalar,
969       .lower_relative_shuffle = true,
970       .lower_quad_broadcast_dynamic = true,
971       .lower_elect = true,
972       .lower_inverse_ballot = true,
973       .lower_rotate_to_shuffle = true,
974    };
975    OPT(nir_lower_subgroups, &subgroups_options);
976 
977    nir_variable_mode indirect_mask =
978       elk_nir_no_indirect_mask(compiler, nir->info.stage);
979    OPT(nir_lower_indirect_derefs, indirect_mask, UINT32_MAX);
980 
981    /* Even in cases where we can handle indirect temporaries via scratch, we
982     * it can still be expensive.  Lower indirects on small arrays to
983     * conditional load/stores.
984     *
985     * The threshold of 16 was chosen semi-arbitrarily.  The idea is that an
986     * indirect on an array of 16 elements is about 30 instructions at which
987     * point, you may be better off doing a send.  With a SIMD8 program, 16
988     * floats is 1/8 of the entire register file.  Any array larger than that
989     * is likely to cause pressure issues.  Also, this value is sufficiently
990     * high that the benchmarks known to suffer from large temporary array
991     * issues are helped but nothing else in shader-db is hurt except for maybe
992     * that one kerbal space program shader.
993     */
994    if (is_scalar && !(indirect_mask & nir_var_function_temp))
995       OPT(nir_lower_indirect_derefs, nir_var_function_temp, 16);
996 
997    /* Lower array derefs of vectors for SSBO and UBO loads.  For both UBOs and
998     * SSBOs, our back-end is capable of loading an entire vec4 at a time and
999     * we would like to take advantage of that whenever possible regardless of
1000     * whether or not the app gives us full loads.  This should allow the
1001     * optimizer to combine UBO and SSBO load operations and save us some send
1002     * messages.
1003     */
1004    OPT(nir_lower_array_deref_of_vec,
1005        nir_var_mem_ubo | nir_var_mem_ssbo, NULL,
1006        nir_lower_direct_array_deref_of_vec_load);
1007 
1008    /* Get rid of split copies */
1009    elk_nir_optimize(nir, is_scalar, devinfo);
1010 }
1011 
1012 static bool
elk_nir_zero_inputs_instr(struct nir_builder * b,nir_intrinsic_instr * intrin,void * data)1013 elk_nir_zero_inputs_instr(struct nir_builder *b, nir_intrinsic_instr *intrin,
1014                           void *data)
1015 {
1016    if (intrin->intrinsic != nir_intrinsic_load_deref)
1017       return false;
1018 
1019    nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
1020    if (!nir_deref_mode_is(deref, nir_var_shader_in))
1021       return false;
1022 
1023    if (deref->deref_type != nir_deref_type_var)
1024       return false;
1025 
1026    nir_variable *var = deref->var;
1027 
1028    uint64_t zero_inputs = *(uint64_t *)data;
1029    if (!(BITFIELD64_BIT(var->data.location) & zero_inputs))
1030       return false;
1031 
1032    b->cursor = nir_before_instr(&intrin->instr);
1033 
1034    nir_def *zero = nir_imm_zero(b, 1, 32);
1035 
1036    nir_def_replace(&intrin->def, zero);
1037 
1038    return true;
1039 }
1040 
1041 static bool
elk_nir_zero_inputs(nir_shader * shader,uint64_t * zero_inputs)1042 elk_nir_zero_inputs(nir_shader *shader, uint64_t *zero_inputs)
1043 {
1044    return nir_shader_intrinsics_pass(shader, elk_nir_zero_inputs_instr,
1045                                      nir_metadata_control_flow,
1046                                      zero_inputs);
1047 }
1048 
1049 void
elk_nir_link_shaders(const struct elk_compiler * compiler,nir_shader * producer,nir_shader * consumer)1050 elk_nir_link_shaders(const struct elk_compiler *compiler,
1051                      nir_shader *producer, nir_shader *consumer)
1052 {
1053    const struct intel_device_info *devinfo = compiler->devinfo;
1054 
1055    nir_lower_io_arrays_to_elements(producer, consumer);
1056    nir_validate_shader(producer, "after nir_lower_io_arrays_to_elements");
1057    nir_validate_shader(consumer, "after nir_lower_io_arrays_to_elements");
1058 
1059    const bool p_is_scalar = compiler->scalar_stage[producer->info.stage];
1060    const bool c_is_scalar = compiler->scalar_stage[consumer->info.stage];
1061 
1062    if (p_is_scalar && c_is_scalar) {
1063       NIR_PASS(_, producer, nir_lower_io_to_scalar_early, nir_var_shader_out);
1064       NIR_PASS(_, consumer, nir_lower_io_to_scalar_early, nir_var_shader_in);
1065       elk_nir_optimize(producer, p_is_scalar, devinfo);
1066       elk_nir_optimize(consumer, c_is_scalar, devinfo);
1067    }
1068 
1069    if (nir_link_opt_varyings(producer, consumer))
1070       elk_nir_optimize(consumer, c_is_scalar, devinfo);
1071 
1072    NIR_PASS(_, producer, nir_remove_dead_variables, nir_var_shader_out, NULL);
1073    NIR_PASS(_, consumer, nir_remove_dead_variables, nir_var_shader_in, NULL);
1074 
1075    if (nir_remove_unused_varyings(producer, consumer)) {
1076       if (should_print_nir(producer)) {
1077          printf("nir_remove_unused_varyings\n");
1078          nir_print_shader(producer, stdout);
1079       }
1080       if (should_print_nir(consumer)) {
1081          printf("nir_remove_unused_varyings\n");
1082          nir_print_shader(consumer, stdout);
1083       }
1084 
1085       NIR_PASS(_, producer, nir_lower_global_vars_to_local);
1086       NIR_PASS(_, consumer, nir_lower_global_vars_to_local);
1087 
1088       /* The backend might not be able to handle indirects on
1089        * temporaries so we need to lower indirects on any of the
1090        * varyings we have demoted here.
1091        */
1092       NIR_PASS(_, producer, nir_lower_indirect_derefs,
1093                   elk_nir_no_indirect_mask(compiler, producer->info.stage),
1094                   UINT32_MAX);
1095       NIR_PASS(_, consumer, nir_lower_indirect_derefs,
1096                   elk_nir_no_indirect_mask(compiler, consumer->info.stage),
1097                   UINT32_MAX);
1098 
1099       elk_nir_optimize(producer, p_is_scalar, devinfo);
1100       elk_nir_optimize(consumer, c_is_scalar, devinfo);
1101    }
1102 
1103    NIR_PASS(_, producer, nir_lower_io_to_vector, nir_var_shader_out);
1104 
1105    if (producer->info.stage == MESA_SHADER_TESS_CTRL &&
1106        producer->options->vectorize_tess_levels)
1107    NIR_PASS_V(producer, nir_vectorize_tess_levels);
1108 
1109    NIR_PASS(_, producer, nir_opt_combine_stores, nir_var_shader_out);
1110    NIR_PASS(_, consumer, nir_lower_io_to_vector, nir_var_shader_in);
1111 
1112    if (producer->info.stage != MESA_SHADER_TESS_CTRL) {
1113       /* Calling lower_io_to_vector creates output variable writes with
1114        * write-masks.  On non-TCS outputs, the back-end can't handle it and we
1115        * need to call nir_lower_io_to_temporaries to get rid of them.  This,
1116        * in turn, creates temporary variables and extra copy_deref intrinsics
1117        * that we need to clean up.
1118        */
1119       NIR_PASS_V(producer, nir_lower_io_to_temporaries,
1120                  nir_shader_get_entrypoint(producer), true, false);
1121       NIR_PASS(_, producer, nir_lower_global_vars_to_local);
1122       NIR_PASS(_, producer, nir_split_var_copies);
1123       NIR_PASS(_, producer, nir_lower_var_copies);
1124    }
1125 }
1126 
1127 bool
elk_nir_should_vectorize_mem(unsigned align_mul,unsigned align_offset,unsigned bit_size,unsigned num_components,nir_intrinsic_instr * low,nir_intrinsic_instr * high,void * data)1128 elk_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
1129                              unsigned bit_size,
1130                              unsigned num_components,
1131                              nir_intrinsic_instr *low,
1132                              nir_intrinsic_instr *high,
1133                              void *data)
1134 {
1135    /* Don't combine things to generate 64-bit loads/stores.  We have to split
1136     * those back into 32-bit ones anyway and UBO loads aren't split in NIR so
1137     * we don't want to make a mess for the back-end.
1138     */
1139    if (bit_size > 32)
1140       return false;
1141 
1142    if (low->intrinsic == nir_intrinsic_load_ubo_uniform_block_intel ||
1143        low->intrinsic == nir_intrinsic_load_ssbo_uniform_block_intel ||
1144        low->intrinsic == nir_intrinsic_load_shared_uniform_block_intel ||
1145        low->intrinsic == nir_intrinsic_load_global_constant_uniform_block_intel) {
1146       if (num_components > 4) {
1147          if (!util_is_power_of_two_nonzero(num_components))
1148             return false;
1149 
1150          if (bit_size != 32)
1151             return false;
1152 
1153          if (num_components > 32)
1154             return false;
1155       }
1156    } else {
1157       /* We can handle at most a vec4 right now.  Anything bigger would get
1158        * immediately split by elk_nir_lower_mem_access_bit_sizes anyway.
1159        */
1160       if (num_components > 4)
1161          return false;
1162    }
1163 
1164 
1165    uint32_t align;
1166    if (align_offset)
1167       align = 1 << (ffs(align_offset) - 1);
1168    else
1169       align = align_mul;
1170 
1171    if (align < bit_size / 8)
1172       return false;
1173 
1174    return true;
1175 }
1176 
1177 static
combine_all_memory_barriers(nir_intrinsic_instr * a,nir_intrinsic_instr * b,void * data)1178 bool combine_all_memory_barriers(nir_intrinsic_instr *a,
1179                                  nir_intrinsic_instr *b,
1180                                  void *data)
1181 {
1182    /* Combine control barriers with identical memory semantics. This prevents
1183     * the second barrier generating a spurious, identical fence message as the
1184     * first barrier.
1185     */
1186    if (nir_intrinsic_memory_modes(a) == nir_intrinsic_memory_modes(b) &&
1187        nir_intrinsic_memory_semantics(a) == nir_intrinsic_memory_semantics(b) &&
1188        nir_intrinsic_memory_scope(a) == nir_intrinsic_memory_scope(b)) {
1189       nir_intrinsic_set_execution_scope(a, MAX2(nir_intrinsic_execution_scope(a),
1190                                                 nir_intrinsic_execution_scope(b)));
1191       return true;
1192    }
1193 
1194    /* Only combine pure memory barriers */
1195    if ((nir_intrinsic_execution_scope(a) != SCOPE_NONE) ||
1196        (nir_intrinsic_execution_scope(b) != SCOPE_NONE))
1197       return false;
1198 
1199    /* Translation to backend IR will get rid of modes we don't care about, so
1200     * no harm in always combining them.
1201     *
1202     * TODO: While HW has only ACQUIRE|RELEASE fences, we could improve the
1203     * scheduling so that it can take advantage of the different semantics.
1204     */
1205    nir_intrinsic_set_memory_modes(a, nir_intrinsic_memory_modes(a) |
1206                                      nir_intrinsic_memory_modes(b));
1207    nir_intrinsic_set_memory_semantics(a, nir_intrinsic_memory_semantics(a) |
1208                                          nir_intrinsic_memory_semantics(b));
1209    nir_intrinsic_set_memory_scope(a, MAX2(nir_intrinsic_memory_scope(a),
1210                                           nir_intrinsic_memory_scope(b)));
1211    return true;
1212 }
1213 
1214 static nir_mem_access_size_align
get_mem_access_size_align(nir_intrinsic_op intrin,uint8_t bytes,uint8_t bit_size,uint32_t align_mul,uint32_t align_offset,bool offset_is_const,const void * cb_data)1215 get_mem_access_size_align(nir_intrinsic_op intrin, uint8_t bytes,
1216                           uint8_t bit_size, uint32_t align_mul, uint32_t align_offset,
1217                           bool offset_is_const, const void *cb_data)
1218 {
1219    const uint32_t align = nir_combined_align(align_mul, align_offset);
1220 
1221    switch (intrin) {
1222    case nir_intrinsic_load_ssbo:
1223    case nir_intrinsic_load_shared:
1224    case nir_intrinsic_load_scratch:
1225       /* The offset is constant so we can use a 32-bit load and just shift it
1226        * around as needed.
1227        */
1228       if (align < 4 && offset_is_const) {
1229          assert(util_is_power_of_two_nonzero(align_mul) && align_mul >= 4);
1230          const unsigned pad = align_offset % 4;
1231          const unsigned comps32 = MIN2(DIV_ROUND_UP(bytes + pad, 4), 4);
1232          return (nir_mem_access_size_align) {
1233             .bit_size = 32,
1234             .num_components = comps32,
1235             .align = 4,
1236          };
1237       }
1238       break;
1239 
1240    default:
1241       break;
1242    }
1243 
1244    const bool is_load = nir_intrinsic_infos[intrin].has_dest;
1245    const bool is_scratch = intrin == nir_intrinsic_load_scratch ||
1246                            intrin == nir_intrinsic_store_scratch;
1247 
1248    if (align < 4 || bytes < 4) {
1249       /* Choose a byte, word, or dword */
1250       bytes = MIN2(bytes, 4);
1251       if (bytes == 3)
1252          bytes = is_load ? 4 : 2;
1253 
1254       if (is_scratch) {
1255          /* The way scratch address swizzling works in the back-end, it
1256           * happens at a DWORD granularity so we can't have a single load
1257           * or store cross a DWORD boundary.
1258           */
1259          if ((align_offset % 4) + bytes > MIN2(align_mul, 4))
1260             bytes = MIN2(align_mul, 4) - (align_offset % 4);
1261 
1262          /* Must be a power of two */
1263          if (bytes == 3)
1264             bytes = 2;
1265       }
1266 
1267       return (nir_mem_access_size_align) {
1268          .bit_size = bytes * 8,
1269          .num_components = 1,
1270          .align = 1,
1271       };
1272    } else {
1273       bytes = MIN2(bytes, 16);
1274       return (nir_mem_access_size_align) {
1275          .bit_size = 32,
1276          .num_components = is_scratch ? 1 :
1277                            is_load ? DIV_ROUND_UP(bytes, 4) : bytes / 4,
1278          .align = 4,
1279       };
1280    }
1281 }
1282 
1283 static void
elk_vectorize_lower_mem_access(nir_shader * nir,const struct elk_compiler * compiler,enum elk_robustness_flags robust_flags)1284 elk_vectorize_lower_mem_access(nir_shader *nir,
1285                                const struct elk_compiler *compiler,
1286                                enum elk_robustness_flags robust_flags)
1287 {
1288    bool progress = false;
1289    const bool is_scalar = compiler->scalar_stage[nir->info.stage];
1290 
1291    if (is_scalar) {
1292       nir_load_store_vectorize_options options = {
1293          .modes = nir_var_mem_ubo | nir_var_mem_ssbo |
1294                   nir_var_mem_global | nir_var_mem_shared,
1295          .callback = elk_nir_should_vectorize_mem,
1296          .robust_modes = (nir_variable_mode)0,
1297       };
1298 
1299       if (robust_flags & ELK_ROBUSTNESS_UBO)
1300          options.robust_modes |= nir_var_mem_ubo | nir_var_mem_global;
1301       if (robust_flags & ELK_ROBUSTNESS_SSBO)
1302          options.robust_modes |= nir_var_mem_ssbo | nir_var_mem_global;
1303 
1304       OPT(nir_opt_load_store_vectorize, &options);
1305    }
1306 
1307    nir_lower_mem_access_bit_sizes_options mem_access_options = {
1308       .modes = nir_var_mem_ssbo |
1309                nir_var_mem_constant |
1310                nir_var_shader_temp |
1311                nir_var_function_temp |
1312                nir_var_mem_global |
1313                nir_var_mem_shared,
1314       .callback = get_mem_access_size_align,
1315    };
1316    OPT(nir_lower_mem_access_bit_sizes, &mem_access_options);
1317 
1318    while (progress) {
1319       progress = false;
1320 
1321       OPT(nir_lower_pack);
1322       OPT(nir_copy_prop);
1323       OPT(nir_opt_dce);
1324       OPT(nir_opt_cse);
1325       OPT(nir_opt_algebraic);
1326       OPT(nir_opt_constant_folding);
1327    }
1328 }
1329 
1330 static bool
nir_shader_has_local_variables(const nir_shader * nir)1331 nir_shader_has_local_variables(const nir_shader *nir)
1332 {
1333    nir_foreach_function_impl(impl, nir) {
1334       if (!exec_list_is_empty(&impl->locals))
1335          return true;
1336    }
1337 
1338    return false;
1339 }
1340 
1341 /* Prepare the given shader for codegen
1342  *
1343  * This function is intended to be called right before going into the actual
1344  * backend and is highly backend-specific.  Also, once this function has been
1345  * called on a shader, it will no longer be in SSA form so most optimizations
1346  * will not work.
1347  */
1348 void
elk_postprocess_nir(nir_shader * nir,const struct elk_compiler * compiler,bool debug_enabled,enum elk_robustness_flags robust_flags)1349 elk_postprocess_nir(nir_shader *nir, const struct elk_compiler *compiler,
1350                     bool debug_enabled,
1351                     enum elk_robustness_flags robust_flags)
1352 {
1353    const struct intel_device_info *devinfo = compiler->devinfo;
1354    const bool is_scalar = compiler->scalar_stage[nir->info.stage];
1355 
1356    UNUSED bool progress; /* Written by OPT */
1357 
1358    OPT(intel_nir_lower_sparse_intrinsics);
1359 
1360    OPT(nir_lower_bit_size, lower_bit_size_callback, (void *)compiler);
1361 
1362    OPT(nir_opt_combine_barriers, combine_all_memory_barriers, NULL);
1363 
1364    do {
1365       progress = false;
1366       OPT(nir_opt_algebraic_before_ffma);
1367    } while (progress);
1368 
1369    elk_nir_optimize(nir, is_scalar, devinfo);
1370 
1371    if (is_scalar && nir_shader_has_local_variables(nir)) {
1372       OPT(nir_lower_vars_to_explicit_types, nir_var_function_temp,
1373           glsl_get_natural_size_align_bytes);
1374       OPT(nir_lower_explicit_io, nir_var_function_temp,
1375           nir_address_format_32bit_offset);
1376       elk_nir_optimize(nir, is_scalar, devinfo);
1377    }
1378 
1379    elk_vectorize_lower_mem_access(nir, compiler, robust_flags);
1380 
1381    if (OPT(nir_lower_int64))
1382       elk_nir_optimize(nir, is_scalar, devinfo);
1383 
1384    if (devinfo->ver >= 6) {
1385       /* Try and fuse multiply-adds, if successful, run shrink_vectors to
1386        * avoid peephole_ffma to generate things like this :
1387        *    vec16 ssa_0 = ...
1388        *    vec16 ssa_1 = fneg ssa_0
1389        *    vec1  ssa_2 = ffma ssa_1, ...
1390        *
1391        * We want this instead :
1392        *    vec16 ssa_0 = ...
1393        *    vec1  ssa_1 = fneg ssa_0.x
1394        *    vec1  ssa_2 = ffma ssa_1, ...
1395        */
1396       if (OPT(intel_nir_opt_peephole_ffma))
1397          OPT(nir_opt_shrink_vectors, false);
1398    }
1399 
1400    if (is_scalar)
1401       OPT(intel_nir_opt_peephole_imul32x16);
1402 
1403    if (OPT(nir_opt_comparison_pre)) {
1404       OPT(nir_copy_prop);
1405       OPT(nir_opt_dce);
1406       OPT(nir_opt_cse);
1407 
1408       /* Do the select peepehole again.  nir_opt_comparison_pre (combined with
1409        * the other optimization passes) will have removed at least one
1410        * instruction from one of the branches of the if-statement, so now it
1411        * might be under the threshold of conversion to bcsel.
1412        *
1413        * See elk_nir_optimize for the explanation of is_vec4_tessellation.
1414        */
1415       const bool is_vec4_tessellation = !is_scalar &&
1416          (nir->info.stage == MESA_SHADER_TESS_CTRL ||
1417           nir->info.stage == MESA_SHADER_TESS_EVAL);
1418       OPT(nir_opt_peephole_select, 0, is_vec4_tessellation, false);
1419       OPT(nir_opt_peephole_select, 1, is_vec4_tessellation,
1420           compiler->devinfo->ver >= 6);
1421    }
1422 
1423    do {
1424       progress = false;
1425       if (OPT(nir_opt_algebraic_late)) {
1426          /* At this late stage, anything that makes more constants will wreak
1427           * havok on the vec4 backend.  The handling of constants in the vec4
1428           * backend is not good.
1429           */
1430          if (is_scalar)
1431             OPT(nir_opt_constant_folding);
1432 
1433          OPT(nir_copy_prop);
1434          OPT(nir_opt_dce);
1435          OPT(nir_opt_cse);
1436       }
1437    } while (progress);
1438 
1439 
1440    if (OPT(nir_lower_fp16_casts, nir_lower_fp16_split_fp64)) {
1441       if (OPT(nir_lower_int64)) {
1442          elk_nir_optimize(nir, is_scalar, devinfo);
1443       }
1444    }
1445 
1446    OPT(intel_nir_lower_conversions);
1447 
1448    if (is_scalar)
1449       OPT(nir_lower_alu_to_scalar, NULL, NULL);
1450 
1451    while (OPT(nir_opt_algebraic_distribute_src_mods)) {
1452       if (is_scalar)
1453          OPT(nir_opt_constant_folding);
1454 
1455       OPT(nir_copy_prop);
1456       OPT(nir_opt_dce);
1457       OPT(nir_opt_cse);
1458    }
1459 
1460    OPT(nir_copy_prop);
1461    OPT(nir_opt_dce);
1462    OPT(nir_opt_move, nir_move_comparisons);
1463    OPT(nir_opt_dead_cf);
1464 
1465    bool divergence_analysis_dirty = false;
1466    NIR_PASS(_, nir, nir_convert_to_lcssa, true, true);
1467    NIR_PASS_V(nir, nir_divergence_analysis);
1468 
1469    /* TODO: Enable nir_opt_uniform_atomics on Gfx7.x too.
1470     * It currently fails Vulkan tests on Haswell for an unknown reason.
1471     */
1472    bool opt_uniform_atomic_stage_allowed = devinfo->ver >= 8;
1473 
1474    if (opt_uniform_atomic_stage_allowed && OPT(nir_opt_uniform_atomics, false)) {
1475       const nir_lower_subgroups_options subgroups_options = {
1476          .ballot_bit_size = 32,
1477          .ballot_components = 1,
1478          .lower_elect = true,
1479       };
1480       OPT(nir_lower_subgroups, &subgroups_options);
1481 
1482       if (OPT(nir_lower_int64))
1483          elk_nir_optimize(nir, is_scalar, devinfo);
1484 
1485       divergence_analysis_dirty = true;
1486    }
1487 
1488    /* Do this only after the last opt_gcm. GCM will undo this lowering. */
1489    if (nir->info.stage == MESA_SHADER_FRAGMENT) {
1490       if (divergence_analysis_dirty) {
1491          NIR_PASS(_, nir, nir_convert_to_lcssa, true, true);
1492          NIR_PASS_V(nir, nir_divergence_analysis);
1493       }
1494 
1495       OPT(intel_nir_lower_non_uniform_barycentric_at_sample);
1496    }
1497 
1498    /* Clean up LCSSA phis */
1499    OPT(nir_opt_remove_phis);
1500 
1501    OPT(nir_lower_bool_to_int32);
1502    OPT(nir_copy_prop);
1503    OPT(nir_opt_dce);
1504 
1505    OPT(nir_lower_locals_to_regs, 32);
1506 
1507    if (unlikely(debug_enabled)) {
1508       /* Re-index SSA defs so we print more sensible numbers. */
1509       nir_foreach_function_impl(impl, nir) {
1510          nir_index_ssa_defs(impl);
1511       }
1512 
1513       fprintf(stderr, "NIR (SSA form) for %s shader:\n",
1514               _mesa_shader_stage_to_string(nir->info.stage));
1515       nir_print_shader(nir, stderr);
1516    }
1517 
1518    nir_validate_ssa_dominance(nir, "before nir_convert_from_ssa");
1519 
1520    /* Rerun the divergence analysis before convert_from_ssa as this pass has
1521     * some assert on consistent divergence flags.
1522     */
1523    NIR_PASS(_, nir, nir_convert_to_lcssa, true, true);
1524    NIR_PASS_V(nir, nir_divergence_analysis);
1525 
1526    OPT(nir_convert_from_ssa, true);
1527 
1528    if (!is_scalar) {
1529       OPT(nir_move_vec_src_uses_to_dest, true);
1530       OPT(nir_lower_vec_to_regs, NULL, NULL);
1531    }
1532 
1533    OPT(nir_opt_dce);
1534 
1535    if (OPT(nir_opt_rematerialize_compares))
1536       OPT(nir_opt_dce);
1537 
1538    nir_trivialize_registers(nir);
1539 
1540    /* This is the last pass we run before we start emitting stuff.  It
1541     * determines when we need to insert boolean resolves on Gen <= 5.  We
1542     * run it last because it stashes data in instr->pass_flags and we don't
1543     * want that to be squashed by other NIR passes.
1544     */
1545    if (devinfo->ver <= 5)
1546       elk_nir_analyze_boolean_resolves(nir);
1547 
1548    nir_sweep(nir);
1549 
1550    if (unlikely(debug_enabled)) {
1551       fprintf(stderr, "NIR (final form) for %s shader:\n",
1552               _mesa_shader_stage_to_string(nir->info.stage));
1553       nir_print_shader(nir, stderr);
1554    }
1555 }
1556 
1557 static bool
elk_nir_apply_sampler_key(nir_shader * nir,const struct elk_compiler * compiler,const struct elk_sampler_prog_key_data * key_tex)1558 elk_nir_apply_sampler_key(nir_shader *nir,
1559                           const struct elk_compiler *compiler,
1560                           const struct elk_sampler_prog_key_data *key_tex)
1561 {
1562    const struct intel_device_info *devinfo = compiler->devinfo;
1563    nir_lower_tex_options tex_options = {
1564       .lower_txd_clamp_bindless_sampler = true,
1565       .lower_txd_clamp_if_sampler_index_not_lt_16 = true,
1566       .lower_invalid_implicit_lod = true,
1567       .lower_index_to_offset = true,
1568    };
1569 
1570    /* Iron Lake and prior require lowering of all rectangle textures */
1571    if (devinfo->ver < 6)
1572       tex_options.lower_rect = true;
1573 
1574    /* Prior to Broadwell, our hardware can't actually do GL_CLAMP */
1575    if (devinfo->ver < 8) {
1576       tex_options.saturate_s = key_tex->gl_clamp_mask[0];
1577       tex_options.saturate_t = key_tex->gl_clamp_mask[1];
1578       tex_options.saturate_r = key_tex->gl_clamp_mask[2];
1579    }
1580 
1581    /* Prior to Haswell, we have to lower gradients on shadow samplers */
1582    tex_options.lower_txd_shadow = devinfo->verx10 <= 70;
1583 
1584    return nir_lower_tex(nir, &tex_options);
1585 }
1586 
1587 static unsigned
get_subgroup_size(const struct shader_info * info,unsigned max_subgroup_size)1588 get_subgroup_size(const struct shader_info *info, unsigned max_subgroup_size)
1589 {
1590    switch (info->subgroup_size) {
1591    case SUBGROUP_SIZE_API_CONSTANT:
1592       /* We have to use the global constant size. */
1593       return ELK_SUBGROUP_SIZE;
1594 
1595    case SUBGROUP_SIZE_UNIFORM:
1596       /* It has to be uniform across all invocations but can vary per stage
1597        * if we want.  This gives us a bit more freedom.
1598        *
1599        * For compute, elk_nir_apply_key is called per-dispatch-width so this
1600        * is the actual subgroup size and not a maximum.  However, we only
1601        * invoke one size of any given compute shader so it's still guaranteed
1602        * to be uniform across invocations.
1603        */
1604       return max_subgroup_size;
1605 
1606    case SUBGROUP_SIZE_VARYING:
1607       /* The subgroup size is allowed to be fully varying.  For geometry
1608        * stages, we know it's always 8 which is max_subgroup_size so we can
1609        * return that.  For compute, elk_nir_apply_key is called once per
1610        * dispatch-width so max_subgroup_size is the real subgroup size.
1611        *
1612        * For fragment, we return 0 and let it fall through to the back-end
1613        * compiler.  This means we can't optimize based on subgroup size but
1614        * that's a risk the client took when it asked for a varying subgroup
1615        * size.
1616        */
1617       return info->stage == MESA_SHADER_FRAGMENT ? 0 : max_subgroup_size;
1618 
1619    case SUBGROUP_SIZE_REQUIRE_4:
1620       unreachable("Unsupported subgroup size type");
1621 
1622    case SUBGROUP_SIZE_REQUIRE_8:
1623    case SUBGROUP_SIZE_REQUIRE_16:
1624    case SUBGROUP_SIZE_REQUIRE_32:
1625       assert(gl_shader_stage_uses_workgroup(info->stage) ||
1626              (info->stage >= MESA_SHADER_RAYGEN && info->stage <= MESA_SHADER_CALLABLE));
1627       /* These enum values are expressly chosen to be equal to the subgroup
1628        * size that they require.
1629        */
1630       return info->subgroup_size;
1631 
1632    case SUBGROUP_SIZE_FULL_SUBGROUPS:
1633    case SUBGROUP_SIZE_REQUIRE_64:
1634    case SUBGROUP_SIZE_REQUIRE_128:
1635       break;
1636    }
1637 
1638    unreachable("Invalid subgroup size type");
1639 }
1640 
1641 unsigned
elk_nir_api_subgroup_size(const nir_shader * nir,unsigned hw_subgroup_size)1642 elk_nir_api_subgroup_size(const nir_shader *nir,
1643                           unsigned hw_subgroup_size)
1644 {
1645    return get_subgroup_size(&nir->info, hw_subgroup_size);
1646 }
1647 
1648 void
elk_nir_apply_key(nir_shader * nir,const struct elk_compiler * compiler,const struct elk_base_prog_key * key,unsigned max_subgroup_size)1649 elk_nir_apply_key(nir_shader *nir,
1650                   const struct elk_compiler *compiler,
1651                   const struct elk_base_prog_key *key,
1652                   unsigned max_subgroup_size)
1653 {
1654    bool progress = false;
1655 
1656    OPT(elk_nir_apply_sampler_key, compiler, &key->tex);
1657 
1658    const struct intel_nir_lower_texture_opts tex_opts = {0};
1659    OPT(intel_nir_lower_texture, &tex_opts);
1660 
1661    const nir_lower_subgroups_options subgroups_options = {
1662       .subgroup_size = get_subgroup_size(&nir->info, max_subgroup_size),
1663       .ballot_bit_size = 32,
1664       .ballot_components = 1,
1665       .lower_subgroup_masks = true,
1666    };
1667    OPT(nir_lower_subgroups, &subgroups_options);
1668 
1669    if (key->limit_trig_input_range)
1670       OPT(elk_nir_limit_trig_input_range_workaround);
1671 
1672    if (progress) {
1673       const bool is_scalar = compiler->scalar_stage[nir->info.stage];
1674       elk_nir_optimize(nir, is_scalar, compiler->devinfo);
1675    }
1676 }
1677 
1678 enum elk_conditional_mod
elk_cmod_for_nir_comparison(nir_op op)1679 elk_cmod_for_nir_comparison(nir_op op)
1680 {
1681    switch (op) {
1682    case nir_op_flt:
1683    case nir_op_flt32:
1684    case nir_op_ilt:
1685    case nir_op_ilt32:
1686    case nir_op_ult:
1687    case nir_op_ult32:
1688       return ELK_CONDITIONAL_L;
1689 
1690    case nir_op_fge:
1691    case nir_op_fge32:
1692    case nir_op_ige:
1693    case nir_op_ige32:
1694    case nir_op_uge:
1695    case nir_op_uge32:
1696       return ELK_CONDITIONAL_GE;
1697 
1698    case nir_op_feq:
1699    case nir_op_feq32:
1700    case nir_op_ieq:
1701    case nir_op_ieq32:
1702    case nir_op_b32all_fequal2:
1703    case nir_op_b32all_iequal2:
1704    case nir_op_b32all_fequal3:
1705    case nir_op_b32all_iequal3:
1706    case nir_op_b32all_fequal4:
1707    case nir_op_b32all_iequal4:
1708       return ELK_CONDITIONAL_Z;
1709 
1710    case nir_op_fneu:
1711    case nir_op_fneu32:
1712    case nir_op_ine:
1713    case nir_op_ine32:
1714    case nir_op_b32any_fnequal2:
1715    case nir_op_b32any_inequal2:
1716    case nir_op_b32any_fnequal3:
1717    case nir_op_b32any_inequal3:
1718    case nir_op_b32any_fnequal4:
1719    case nir_op_b32any_inequal4:
1720       return ELK_CONDITIONAL_NZ;
1721 
1722    default:
1723       unreachable("Unsupported NIR comparison op");
1724    }
1725 }
1726 
1727 enum elk_lsc_opcode
elk_lsc_aop_for_nir_intrinsic(const nir_intrinsic_instr * atomic)1728 elk_lsc_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic)
1729 {
1730    switch (nir_intrinsic_atomic_op(atomic)) {
1731    case nir_atomic_op_iadd: {
1732       unsigned src_idx;
1733       switch (atomic->intrinsic) {
1734       case nir_intrinsic_image_atomic:
1735       case nir_intrinsic_bindless_image_atomic:
1736          src_idx = 3;
1737          break;
1738       case nir_intrinsic_ssbo_atomic:
1739          src_idx = 2;
1740          break;
1741       case nir_intrinsic_shared_atomic:
1742       case nir_intrinsic_global_atomic:
1743          src_idx = 1;
1744          break;
1745       default:
1746          unreachable("Invalid add atomic opcode");
1747       }
1748 
1749       if (nir_src_is_const(atomic->src[src_idx])) {
1750          int64_t add_val = nir_src_as_int(atomic->src[src_idx]);
1751          if (add_val == 1)
1752             return LSC_OP_ATOMIC_INC;
1753          else if (add_val == -1)
1754             return LSC_OP_ATOMIC_DEC;
1755       }
1756       return LSC_OP_ATOMIC_ADD;
1757    }
1758 
1759    case nir_atomic_op_imin: return LSC_OP_ATOMIC_MIN;
1760    case nir_atomic_op_umin: return LSC_OP_ATOMIC_UMIN;
1761    case nir_atomic_op_imax: return LSC_OP_ATOMIC_MAX;
1762    case nir_atomic_op_umax: return LSC_OP_ATOMIC_UMAX;
1763    case nir_atomic_op_iand: return LSC_OP_ATOMIC_AND;
1764    case nir_atomic_op_ior:  return LSC_OP_ATOMIC_OR;
1765    case nir_atomic_op_ixor: return LSC_OP_ATOMIC_XOR;
1766    case nir_atomic_op_xchg: return LSC_OP_ATOMIC_STORE;
1767    case nir_atomic_op_cmpxchg: return LSC_OP_ATOMIC_CMPXCHG;
1768 
1769    case nir_atomic_op_fmin: return LSC_OP_ATOMIC_FMIN;
1770    case nir_atomic_op_fmax: return LSC_OP_ATOMIC_FMAX;
1771    case nir_atomic_op_fcmpxchg: return LSC_OP_ATOMIC_FCMPXCHG;
1772    case nir_atomic_op_fadd: return LSC_OP_ATOMIC_FADD;
1773 
1774    default:
1775       unreachable("Unsupported NIR atomic intrinsic");
1776    }
1777 }
1778 
1779 enum elk_reg_type
elk_type_for_nir_type(const struct intel_device_info * devinfo,nir_alu_type type)1780 elk_type_for_nir_type(const struct intel_device_info *devinfo,
1781                       nir_alu_type type)
1782 {
1783    switch (type) {
1784    case nir_type_uint:
1785    case nir_type_uint32:
1786       return ELK_REGISTER_TYPE_UD;
1787    case nir_type_bool:
1788    case nir_type_int:
1789    case nir_type_bool32:
1790    case nir_type_int32:
1791       return ELK_REGISTER_TYPE_D;
1792    case nir_type_float:
1793    case nir_type_float32:
1794       return ELK_REGISTER_TYPE_F;
1795    case nir_type_float16:
1796       return ELK_REGISTER_TYPE_HF;
1797    case nir_type_float64:
1798       return ELK_REGISTER_TYPE_DF;
1799    case nir_type_int64:
1800       return devinfo->ver < 8 ? ELK_REGISTER_TYPE_DF : ELK_REGISTER_TYPE_Q;
1801    case nir_type_uint64:
1802       return devinfo->ver < 8 ? ELK_REGISTER_TYPE_DF : ELK_REGISTER_TYPE_UQ;
1803    case nir_type_int16:
1804       return ELK_REGISTER_TYPE_W;
1805    case nir_type_uint16:
1806       return ELK_REGISTER_TYPE_UW;
1807    case nir_type_int8:
1808       return ELK_REGISTER_TYPE_B;
1809    case nir_type_uint8:
1810       return ELK_REGISTER_TYPE_UB;
1811    default:
1812       unreachable("unknown type");
1813    }
1814 
1815    return ELK_REGISTER_TYPE_F;
1816 }
1817 
1818 nir_shader *
elk_nir_create_passthrough_tcs(void * mem_ctx,const struct elk_compiler * compiler,const struct elk_tcs_prog_key * key)1819 elk_nir_create_passthrough_tcs(void *mem_ctx, const struct elk_compiler *compiler,
1820                                const struct elk_tcs_prog_key *key)
1821 {
1822    assert(key->input_vertices > 0);
1823 
1824    const nir_shader_compiler_options *options =
1825       compiler->nir_options[MESA_SHADER_TESS_CTRL];
1826 
1827    uint64_t inputs_read = key->outputs_written &
1828       ~(VARYING_BIT_TESS_LEVEL_INNER | VARYING_BIT_TESS_LEVEL_OUTER);
1829 
1830    unsigned locations[64];
1831    unsigned num_locations = 0;
1832 
1833    u_foreach_bit64(varying, inputs_read)
1834       locations[num_locations++] = varying;
1835 
1836    nir_shader *nir =
1837       nir_create_passthrough_tcs_impl(options, locations, num_locations,
1838                                       key->input_vertices);
1839 
1840    ralloc_steal(mem_ctx, nir);
1841 
1842    nir->info.inputs_read = inputs_read;
1843    nir->info.tess._primitive_mode = key->_tes_primitive_mode;
1844    nir_validate_shader(nir, "in elk_nir_create_passthrough_tcs");
1845 
1846    struct elk_nir_compiler_opts opts = {};
1847    elk_preprocess_nir(compiler, nir, &opts);
1848 
1849    return nir;
1850 }
1851 
1852 nir_def *
elk_nir_load_global_const(nir_builder * b,nir_intrinsic_instr * load_uniform,nir_def * base_addr,unsigned off)1853 elk_nir_load_global_const(nir_builder *b, nir_intrinsic_instr *load_uniform,
1854       nir_def *base_addr, unsigned off)
1855 {
1856    assert(load_uniform->intrinsic == nir_intrinsic_load_uniform);
1857 
1858    unsigned bit_size = load_uniform->def.bit_size;
1859    assert(bit_size >= 8 && bit_size % 8 == 0);
1860    unsigned byte_size = bit_size / 8;
1861    nir_def *sysval;
1862 
1863    if (nir_src_is_const(load_uniform->src[0])) {
1864       uint64_t offset = off +
1865                         nir_intrinsic_base(load_uniform) +
1866                         nir_src_as_uint(load_uniform->src[0]);
1867 
1868       /* Things should be component-aligned. */
1869       assert(offset % byte_size == 0);
1870 
1871       unsigned suboffset = offset % 64;
1872       uint64_t aligned_offset = offset - suboffset;
1873 
1874       /* Load two just in case we go over a 64B boundary */
1875       nir_def *data[2];
1876       for (unsigned i = 0; i < 2; i++) {
1877          nir_def *addr = nir_iadd_imm(b, base_addr, aligned_offset + i * 64);
1878          data[i] = nir_load_global_constant_uniform_block_intel(b, 16, 32, addr);
1879       }
1880 
1881       sysval = nir_extract_bits(b, data, 2, suboffset * 8,
1882                                 load_uniform->num_components, bit_size);
1883    } else {
1884       nir_def *offset32 =
1885          nir_iadd_imm(b, load_uniform->src[0].ssa,
1886                          off + nir_intrinsic_base(load_uniform));
1887       nir_def *addr = nir_iadd(b, base_addr, nir_u2u64(b, offset32));
1888       sysval = nir_load_global_constant(b, addr, byte_size,
1889                                         load_uniform->num_components, bit_size);
1890    }
1891 
1892    return sysval;
1893 }
1894 
1895 const struct glsl_type *
elk_nir_get_var_type(const struct nir_shader * nir,nir_variable * var)1896 elk_nir_get_var_type(const struct nir_shader *nir, nir_variable *var)
1897 {
1898    const struct glsl_type *type = var->interface_type;
1899    if (!type) {
1900       type = var->type;
1901       if (nir_is_arrayed_io(var, nir->info.stage) || var->data.per_view) {
1902          assert(glsl_type_is_array(type));
1903          type = glsl_get_array_element(type);
1904       }
1905    }
1906 
1907    return type;
1908 }
1909 
1910