xref: /aosp_15_r20/external/mesa3d/src/gallium/frontends/lavapipe/lvp_inline_uniforms.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2020 Advanced Micro Devices, Inc.
3  * Copyright © 2022 Valve Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 /* enhanced version of nir_inline_uniforms that can inline from any uniform buffer
26  * see nir_inline_uniforms.c for more details
27  */
28 
29 #include "nir_builder.h"
30 #include "nir_loop_analyze.h"
31 #include "lvp_private.h"
32 
33 static bool
is_src_uniform_load(nir_src src)34 is_src_uniform_load(nir_src src)
35 {
36    if (nir_src_bit_size(src) != 32 || nir_src_num_components(src) != 1 || nir_src_is_const(src))
37       return false;
38    return nir_collect_src_uniforms(&src, 0, NULL, NULL,
39                                    PIPE_MAX_CONSTANT_BUFFERS, UINT_MAX);
40 }
41 
42 static void
process_node(nir_cf_node * node,nir_loop_info * info,uint32_t * uni_offsets,uint8_t * num_offsets,struct set * stores)43 process_node(nir_cf_node *node, nir_loop_info *info,
44              uint32_t *uni_offsets, uint8_t *num_offsets,
45              struct set *stores)
46 {
47    switch (node->type) {
48    case nir_cf_node_if: {
49       nir_if *if_node = nir_cf_node_as_if(node);
50       const nir_src *cond = &if_node->condition;
51       nir_add_inlinable_uniforms(cond, info, uni_offsets, num_offsets,
52                                  PIPE_MAX_CONSTANT_BUFFERS, UINT_MAX);
53 
54       /* Do not pass loop info down so only alow induction variable
55        * in loop terminator "if":
56        *
57        *     for (i = 0; true; i++)
58        *         if (i == count)
59        *             if (i == num)
60        *                 <no break>
61        *             break
62        *
63        * so "num" won't be inlined due to the "if" is not a
64        * terminator.
65        */
66       info = NULL;
67 
68       foreach_list_typed(nir_cf_node, nested_node, node, &if_node->then_list)
69          process_node(nested_node, info, uni_offsets, num_offsets, stores);
70       foreach_list_typed(nir_cf_node, nested_node, node, &if_node->else_list)
71          process_node(nested_node, info, uni_offsets, num_offsets, stores);
72       break;
73    }
74 
75    case nir_cf_node_loop: {
76       nir_loop *loop = nir_cf_node_as_loop(node);
77 
78       /* Replace loop info, no nested loop info currently:
79        *
80        *     for (i = 0; i < count0; i++)
81        *         for (j = 0; j < count1; j++)
82        *             if (i == num)
83        *
84        * so "num" won't be inlined due to "i" is an induction
85        * variable of upper loop.
86        */
87       info = loop->info;
88 
89       foreach_list_typed(nir_cf_node, nested_node, node, &loop->body) {
90          bool is_terminator = false;
91          list_for_each_entry(nir_loop_terminator, terminator,
92                              &info->loop_terminator_list,
93                              loop_terminator_link) {
94             if (nested_node == &terminator->nif->cf_node) {
95                is_terminator = true;
96                break;
97             }
98          }
99 
100          /* Allow induction variables for terminator "if" only:
101           *
102           *     for (i = 0; i < count; i++)
103           *         if (i == num)
104           *             <no break>
105           *
106           * so "num" won't be inlined due to the "if" is not a
107           * terminator.
108           */
109          nir_loop_info *use_info = is_terminator ? info : NULL;
110          process_node(nested_node, use_info, uni_offsets, num_offsets, stores);
111       }
112       break;
113    }
114 
115    case nir_cf_node_block: {
116       nir_block *block = nir_cf_node_as_block(node);
117       nir_foreach_instr(instr, block) {
118          if (instr->type == nir_instr_type_intrinsic) {
119             nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
120             if (intr->intrinsic == nir_intrinsic_store_deref && is_src_uniform_load(intr->src[1]))
121                _mesa_set_add(stores, &intr->src[1]);
122          }
123       }
124       break;
125    }
126    default:
127       break;
128    }
129 }
130 
131 bool
lvp_find_inlinable_uniforms(struct lvp_shader * shader,nir_shader * nir)132 lvp_find_inlinable_uniforms(struct lvp_shader *shader, nir_shader *nir)
133 {
134    bool ret = false;
135    struct set *stores = _mesa_set_create(nir, _mesa_hash_pointer, _mesa_key_pointer_equal);
136    nir_foreach_function_impl(impl, nir) {
137       nir_metadata_require(impl, nir_metadata_loop_analysis, nir_var_all);
138 
139       foreach_list_typed(nir_cf_node, node, node, &impl->body)
140          process_node(node, NULL, (uint32_t*)shader->inlines.uniform_offsets, shader->inlines.count, stores);
141    }
142    const unsigned threshold = 5;
143    set_foreach(stores, entry) {
144       const nir_src *src = entry->key;
145       unsigned counter = 0;
146       list_for_each_entry(nir_src, rsrc, &src->ssa->uses, use_link) {
147          counter++;
148          if (counter >= threshold)
149             break;
150       }
151       if (counter >= threshold) {
152          uint8_t new_num[PIPE_MAX_CONSTANT_BUFFERS];
153          memcpy(new_num, shader->inlines.count, sizeof(new_num));
154 
155          uint32_t *uni_offsets =
156             (uint32_t *) shader->inlines.uniform_offsets;
157 
158          if (nir_collect_src_uniforms(src, 0, uni_offsets, new_num,
159                                       PIPE_MAX_CONSTANT_BUFFERS, UINT_MAX)) {
160             ret = true;
161             memcpy(shader->inlines.count, new_num, sizeof(new_num));
162          }
163       }
164    }
165    for (unsigned i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
166       if (shader->inlines.count[i]) {
167          shader->inlines.can_inline |= BITFIELD_BIT(i);
168          break;
169       }
170    }
171    return ret;
172 }
173 
174 void
lvp_inline_uniforms(nir_shader * nir,const struct lvp_shader * shader,const uint32_t * uniform_values,uint32_t ubo)175 lvp_inline_uniforms(nir_shader *nir, const struct lvp_shader *shader, const uint32_t *uniform_values, uint32_t ubo)
176 {
177    if (!shader->inlines.can_inline)
178       return;
179 
180    nir_foreach_function_impl(impl, nir) {
181       nir_builder b = nir_builder_create(impl);
182       nir_foreach_block(block, impl) {
183          nir_foreach_instr_safe(instr, block) {
184             if (instr->type != nir_instr_type_intrinsic)
185                continue;
186 
187             nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
188 
189             /* Only replace loads with constant offsets. */
190             if (intr->intrinsic == nir_intrinsic_load_ubo &&
191                 nir_src_is_const(intr->src[0]) &&
192                 nir_src_as_uint(intr->src[0]) == ubo &&
193                 nir_src_is_const(intr->src[1]) &&
194                 /* TODO: Can't handle other bit sizes for now. */
195                 intr->def.bit_size == 32) {
196                int num_components = intr->def.num_components;
197                uint32_t offset = nir_src_as_uint(intr->src[1]);
198                const unsigned num_uniforms = shader->inlines.count[ubo];
199                const unsigned *uniform_dw_offsets = shader->inlines.uniform_offsets[ubo];
200 
201                if (num_components == 1) {
202                   /* Just replace the uniform load to constant load. */
203                   for (unsigned i = 0; i < num_uniforms; i++) {
204                      if (offset == uniform_dw_offsets[i]) {
205                         b.cursor = nir_before_instr(&intr->instr);
206                         nir_def *def = nir_imm_int(&b, uniform_values[i]);
207                         nir_def_replace(&intr->def, def);
208                         break;
209                      }
210                   }
211                } else {
212                   /* Lower vector uniform load to scalar and replace each
213                    * found component load with constant load.
214                    */
215                   uint32_t max_offset = offset + num_components;
216                   nir_def *components[NIR_MAX_VEC_COMPONENTS] = {0};
217                   bool found = false;
218 
219                   b.cursor = nir_before_instr(&intr->instr);
220 
221                   /* Find component to replace. */
222                   for (unsigned i = 0; i < num_uniforms; i++) {
223                      uint32_t uni_offset = uniform_dw_offsets[i];
224                      if (uni_offset >= offset && uni_offset < max_offset) {
225                         int index = uni_offset - offset;
226                         components[index] = nir_imm_int(&b, uniform_values[i]);
227                         found = true;
228                      }
229                   }
230 
231                   if (!found)
232                      continue;
233 
234                   /* Create per-component uniform load. */
235                   for (unsigned i = 0; i < num_components; i++) {
236                      if (!components[i]) {
237                         uint32_t scalar_offset = (offset + i) * 4;
238                         components[i] = nir_load_ubo(&b, 1, intr->def.bit_size,
239                                                      intr->src[0].ssa,
240                                                      nir_imm_int(&b, scalar_offset));
241                         nir_intrinsic_instr *load =
242                            nir_instr_as_intrinsic(components[i]->parent_instr);
243                         nir_intrinsic_set_align(load, NIR_ALIGN_MUL_MAX, scalar_offset);
244                         nir_intrinsic_set_range_base(load, scalar_offset);
245                         nir_intrinsic_set_range(load, 4);
246                      }
247                   }
248 
249                   /* Replace the original uniform load. */
250                   nir_def_replace(&intr->def,
251                                   nir_vec(&b, components, num_components));
252                }
253             }
254          }
255       }
256 
257       nir_metadata_preserve(impl, nir_metadata_control_flow);
258    }
259 }
260