xref: /aosp_15_r20/external/mesa3d/src/compiler/nir/nir_opt_constant_folding.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <math.h>
25 #include "nir.h"
26 #include "nir_builder.h"
27 #include "nir_constant_expressions.h"
28 #include "nir_deref.h"
29 
30 /*
31  * Implements SSA-based constant folding.
32  */
33 
34 struct constant_fold_state {
35    bool has_load_constant;
36    bool has_indirect_load_const;
37 };
38 
39 static bool
try_fold_alu(nir_builder * b,nir_alu_instr * alu)40 try_fold_alu(nir_builder *b, nir_alu_instr *alu)
41 {
42    nir_const_value src[NIR_ALU_MAX_INPUTS][NIR_MAX_VEC_COMPONENTS];
43 
44    /* In the case that any outputs/inputs have unsized types, then we need to
45     * guess the bit-size. In this case, the validator ensures that all
46     * bit-sizes match so we can just take the bit-size from first
47     * output/input with an unsized type. If all the outputs/inputs are sized
48     * then we don't need to guess the bit-size at all because the code we
49     * generate for constant opcodes in this case already knows the sizes of
50     * the types involved and does not need the provided bit-size for anything
51     * (although it still requires to receive a valid bit-size).
52     */
53    unsigned bit_size = 0;
54    if (!nir_alu_type_get_type_size(nir_op_infos[alu->op].output_type))
55       bit_size = alu->def.bit_size;
56 
57    for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
58       if (bit_size == 0 &&
59           !nir_alu_type_get_type_size(nir_op_infos[alu->op].input_types[i]))
60          bit_size = alu->src[i].src.ssa->bit_size;
61 
62       nir_instr *src_instr = alu->src[i].src.ssa->parent_instr;
63 
64       if (src_instr->type != nir_instr_type_load_const)
65          return false;
66       nir_load_const_instr *load_const = nir_instr_as_load_const(src_instr);
67 
68       for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(alu, i);
69            j++) {
70          src[i][j] = load_const->value[alu->src[i].swizzle[j]];
71       }
72    }
73 
74    if (bit_size == 0)
75       bit_size = 32;
76 
77    nir_const_value dest[NIR_MAX_VEC_COMPONENTS];
78    nir_const_value *srcs[NIR_ALU_MAX_INPUTS];
79    memset(dest, 0, sizeof(dest));
80    for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; ++i)
81       srcs[i] = src[i];
82    nir_eval_const_opcode(alu->op, dest, alu->def.num_components,
83                          bit_size, srcs,
84                          b->shader->info.float_controls_execution_mode);
85 
86    b->cursor = nir_before_instr(&alu->instr);
87    nir_def *imm = nir_build_imm(b, alu->def.num_components,
88                                 alu->def.bit_size,
89                                 dest);
90    nir_def_replace(&alu->def, imm);
91    nir_instr_free(&alu->instr);
92 
93    return true;
94 }
95 
96 static nir_const_value *
const_value_for_deref(nir_deref_instr * deref)97 const_value_for_deref(nir_deref_instr *deref)
98 {
99    if (!nir_deref_mode_is(deref, nir_var_mem_constant))
100       return NULL;
101 
102    nir_deref_path path;
103    nir_deref_path_init(&path, deref, NULL);
104    if (path.path[0]->deref_type != nir_deref_type_var)
105       goto fail;
106 
107    nir_variable *var = path.path[0]->var;
108    assert(var->data.mode == nir_var_mem_constant);
109    if (var->constant_initializer == NULL)
110       goto fail;
111 
112    if (var->constant_initializer->is_null_constant) {
113       /* Doesn't matter what casts are in the way, it's all zeros */
114       nir_deref_path_finish(&path);
115       return var->constant_initializer->values;
116    }
117 
118    nir_constant *c = var->constant_initializer;
119    nir_const_value *v = NULL; /* Vector value for array-deref-of-vec */
120 
121    for (unsigned i = 1; path.path[i] != NULL; i++) {
122       nir_deref_instr *p = path.path[i];
123       switch (p->deref_type) {
124       case nir_deref_type_var:
125          unreachable("Deref paths can only start with a var deref");
126 
127       case nir_deref_type_array: {
128          assert(v == NULL);
129          if (!nir_src_is_const(p->arr.index))
130             goto fail;
131 
132          uint64_t idx = nir_src_as_uint(p->arr.index);
133          if (c->num_elements > 0) {
134             assert(glsl_type_is_array(path.path[i - 1]->type));
135             if (idx >= c->num_elements)
136                goto fail;
137             c = c->elements[idx];
138          } else {
139             assert(glsl_type_is_vector(path.path[i - 1]->type));
140             assert(glsl_type_is_scalar(p->type));
141             if (idx >= NIR_MAX_VEC_COMPONENTS)
142                goto fail;
143             v = &c->values[idx];
144          }
145          break;
146       }
147 
148       case nir_deref_type_struct:
149          assert(glsl_type_is_struct(path.path[i - 1]->type));
150          assert(v == NULL && c->num_elements > 0);
151          if (p->strct.index >= c->num_elements)
152             goto fail;
153          c = c->elements[p->strct.index];
154          break;
155 
156       default:
157          goto fail;
158       }
159    }
160 
161    /* We have to have ended at a vector */
162    assert(c->num_elements == 0);
163    nir_deref_path_finish(&path);
164    return v ? v : c->values;
165 
166 fail:
167    nir_deref_path_finish(&path);
168    return NULL;
169 }
170 
171 static bool
try_fold_intrinsic(nir_builder * b,nir_intrinsic_instr * intrin,struct constant_fold_state * state)172 try_fold_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
173                    struct constant_fold_state *state)
174 {
175    switch (intrin->intrinsic) {
176    case nir_intrinsic_demote_if:
177    case nir_intrinsic_terminate_if:
178       if (nir_src_is_const(intrin->src[0])) {
179          if (nir_src_as_bool(intrin->src[0])) {
180             b->cursor = nir_before_instr(&intrin->instr);
181             nir_intrinsic_op op;
182             switch (intrin->intrinsic) {
183             case nir_intrinsic_demote_if:
184                op = nir_intrinsic_demote;
185                break;
186             case nir_intrinsic_terminate_if:
187                op = nir_intrinsic_terminate;
188                break;
189             default:
190                unreachable("invalid intrinsic");
191             }
192             nir_intrinsic_instr *new_instr =
193                nir_intrinsic_instr_create(b->shader, op);
194             nir_builder_instr_insert(b, &new_instr->instr);
195          }
196          nir_instr_remove(&intrin->instr);
197          return true;
198       }
199       return false;
200 
201    case nir_intrinsic_load_deref: {
202       nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
203       nir_const_value *v = const_value_for_deref(deref);
204       if (v) {
205          b->cursor = nir_before_instr(&intrin->instr);
206          nir_def *val = nir_build_imm(b, intrin->def.num_components,
207                                       intrin->def.bit_size, v);
208          nir_def_replace(&intrin->def, val);
209          return true;
210       }
211       return false;
212    }
213 
214    case nir_intrinsic_load_constant: {
215       state->has_load_constant = true;
216 
217       if (!nir_src_is_const(intrin->src[0])) {
218          state->has_indirect_load_const = true;
219          return false;
220       }
221 
222       unsigned offset = nir_src_as_uint(intrin->src[0]);
223       unsigned base = nir_intrinsic_base(intrin);
224       unsigned range = nir_intrinsic_range(intrin);
225       assert(base + range <= b->shader->constant_data_size);
226 
227       b->cursor = nir_before_instr(&intrin->instr);
228       nir_def *val;
229       if (offset >= range) {
230          val = nir_undef(b, intrin->def.num_components,
231                          intrin->def.bit_size);
232       } else {
233          nir_const_value imm[NIR_MAX_VEC_COMPONENTS];
234          memset(imm, 0, sizeof(imm));
235          uint8_t *data = (uint8_t *)b->shader->constant_data + base;
236          for (unsigned i = 0; i < intrin->num_components; i++) {
237             unsigned bytes = intrin->def.bit_size / 8;
238             bytes = MIN2(bytes, range - offset);
239 
240             memcpy(&imm[i].u64, data + offset, bytes);
241             offset += bytes;
242          }
243          val = nir_build_imm(b, intrin->def.num_components,
244                              intrin->def.bit_size, imm);
245       }
246       nir_def_replace(&intrin->def, val);
247       return true;
248    }
249 
250    case nir_intrinsic_ddx:
251    case nir_intrinsic_ddx_fine:
252    case nir_intrinsic_ddx_coarse:
253    case nir_intrinsic_ddy:
254    case nir_intrinsic_ddy_fine:
255    case nir_intrinsic_ddy_coarse: {
256       if (!nir_src_is_const(intrin->src[0]))
257          return false;
258 
259       /* Derivative of a constant is zero, except for NaNs and Infs */
260       nir_const_value imm[NIR_MAX_VEC_COMPONENTS];
261       unsigned sz = intrin->def.bit_size;
262 
263       b->cursor = nir_before_instr(&intrin->instr);
264 
265       for (unsigned i = 0; i < intrin->def.num_components; i++) {
266          bool finite = isfinite(nir_src_comp_as_float(intrin->src[0], i));
267          imm[i] = nir_const_value_for_float(finite ? 0 : NAN, sz);
268       }
269 
270       nir_def_replace(&intrin->def,
271                       nir_build_imm(b, intrin->def.num_components, sz, imm));
272       return true;
273    }
274 
275    case nir_intrinsic_vote_any:
276    case nir_intrinsic_vote_all:
277    case nir_intrinsic_read_invocation:
278    case nir_intrinsic_read_first_invocation:
279    case nir_intrinsic_as_uniform:
280    case nir_intrinsic_shuffle:
281    case nir_intrinsic_shuffle_xor:
282    case nir_intrinsic_shuffle_up:
283    case nir_intrinsic_shuffle_down:
284    case nir_intrinsic_quad_broadcast:
285    case nir_intrinsic_quad_swap_horizontal:
286    case nir_intrinsic_quad_swap_vertical:
287    case nir_intrinsic_quad_swap_diagonal:
288    case nir_intrinsic_quad_swizzle_amd:
289    case nir_intrinsic_masked_swizzle_amd:
290       /* All of these have the data payload in the first source.  They may
291        * have a second source with a shuffle index but that doesn't matter if
292        * the data is constant.
293        */
294       if (nir_src_is_const(intrin->src[0])) {
295          nir_def_replace(&intrin->def, intrin->src[0].ssa);
296          return true;
297       }
298       return false;
299 
300    case nir_intrinsic_vote_feq:
301    case nir_intrinsic_vote_ieq:
302       if (nir_src_is_const(intrin->src[0])) {
303          b->cursor = nir_before_instr(&intrin->instr);
304          nir_def_replace(&intrin->def, nir_imm_true(b));
305          return true;
306       }
307       return false;
308 
309    case nir_intrinsic_inverse_ballot: {
310       if (!nir_src_is_const(intrin->src[0]))
311          return false;
312       bool constant_true = true;
313       bool constant_false = true;
314       for (unsigned i = 0; i < nir_src_num_components(intrin->src[0]); i++) {
315          int64_t value = nir_src_comp_as_int(intrin->src[0], i);
316          constant_true &= value == -1;
317          constant_false &= value == 0;
318       }
319       if (!constant_true && !constant_false)
320          return false;
321       b->cursor = nir_before_instr(&intrin->instr);
322       nir_def_replace(&intrin->def, nir_imm_bool(b, constant_true));
323       return true;
324    }
325 
326    default:
327       return false;
328    }
329 }
330 
331 static bool
try_fold_txb_to_tex(nir_builder * b,nir_tex_instr * tex)332 try_fold_txb_to_tex(nir_builder *b, nir_tex_instr *tex)
333 {
334    assert(tex->op == nir_texop_txb);
335 
336    const int bias_idx = nir_tex_instr_src_index(tex, nir_tex_src_bias);
337 
338    /* nir_to_tgsi_lower_tex mangles many kinds of texture instructions,
339     * including txb, into invalid states.  It removes the special
340     * parameters and appends the values to the texture coordinate.
341     */
342    if (bias_idx < 0)
343       return false;
344 
345    if (nir_src_is_const(tex->src[bias_idx].src) &&
346        nir_src_as_float(tex->src[bias_idx].src) == 0.0) {
347       nir_tex_instr_remove_src(tex, bias_idx);
348       tex->op = nir_texop_tex;
349       return true;
350    }
351 
352    return false;
353 }
354 
355 static bool
try_fold_tex_offset(nir_tex_instr * tex,unsigned * index,nir_tex_src_type src_type)356 try_fold_tex_offset(nir_tex_instr *tex, unsigned *index,
357                     nir_tex_src_type src_type)
358 {
359    const int src_idx = nir_tex_instr_src_index(tex, src_type);
360    if (src_idx < 0)
361       return false;
362 
363    if (!nir_src_is_const(tex->src[src_idx].src))
364       return false;
365 
366    *index += nir_src_as_uint(tex->src[src_idx].src);
367    nir_tex_instr_remove_src(tex, src_idx);
368 
369    return true;
370 }
371 
372 static bool
try_fold_texel_offset_src(nir_tex_instr * tex)373 try_fold_texel_offset_src(nir_tex_instr *tex)
374 {
375    int offset_src = nir_tex_instr_src_index(tex, nir_tex_src_offset);
376    if (offset_src < 0)
377       return false;
378 
379    unsigned size = nir_tex_instr_src_size(tex, offset_src);
380    nir_tex_src *src = &tex->src[offset_src];
381 
382    for (unsigned i = 0; i < size; i++) {
383       nir_scalar comp = nir_scalar_resolved(src->src.ssa, i);
384       if (!nir_scalar_is_const(comp) || nir_scalar_as_uint(comp) != 0)
385          return false;
386    }
387 
388    nir_tex_instr_remove_src(tex, offset_src);
389 
390    return true;
391 }
392 
393 static bool
try_fold_tex(nir_builder * b,nir_tex_instr * tex)394 try_fold_tex(nir_builder *b, nir_tex_instr *tex)
395 {
396    bool progress = false;
397 
398    progress |= try_fold_tex_offset(tex, &tex->texture_index,
399                                    nir_tex_src_texture_offset);
400    progress |= try_fold_tex_offset(tex, &tex->sampler_index,
401                                    nir_tex_src_sampler_offset);
402 
403    /* txb with a bias of constant zero is just tex. */
404    if (tex->op == nir_texop_txb)
405       progress |= try_fold_txb_to_tex(b, tex);
406 
407    /* tex with a zero offset is just tex. */
408    progress |= try_fold_texel_offset_src(tex);
409 
410    return progress;
411 }
412 
413 static bool
try_fold_instr(nir_builder * b,nir_instr * instr,void * _state)414 try_fold_instr(nir_builder *b, nir_instr *instr, void *_state)
415 {
416    switch (instr->type) {
417    case nir_instr_type_alu:
418       return try_fold_alu(b, nir_instr_as_alu(instr));
419    case nir_instr_type_intrinsic:
420       return try_fold_intrinsic(b, nir_instr_as_intrinsic(instr), _state);
421    case nir_instr_type_tex:
422       return try_fold_tex(b, nir_instr_as_tex(instr));
423    default:
424       /* Don't know how to constant fold */
425       return false;
426    }
427 }
428 
429 bool
nir_opt_constant_folding(nir_shader * shader)430 nir_opt_constant_folding(nir_shader *shader)
431 {
432    struct constant_fold_state state;
433    state.has_load_constant = false;
434    state.has_indirect_load_const = false;
435 
436    bool progress = nir_shader_instructions_pass(shader, try_fold_instr,
437                                                 nir_metadata_control_flow,
438                                                 &state);
439 
440    /* This doesn't free the constant data if there are no constant loads because
441     * the data might still be used but the loads have been lowered to load_ubo
442     */
443    if (state.has_load_constant && !state.has_indirect_load_const &&
444        shader->constant_data_size) {
445       ralloc_free(shader->constant_data);
446       shader->constant_data = NULL;
447       shader->constant_data_size = 0;
448    }
449 
450    return progress;
451 }
452