xref: /aosp_15_r20/external/mesa3d/src/compiler/nir/nir_split_64bit_vec3_and_vec4.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2022 Collabora Ltd
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Gert Wollny <[email protected]>
25  */
26 
27 #include "nir.h"
28 #include "nir_builder.h"
29 
30 #include "util/hash_table.h"
31 #include "nir_deref.h"
32 
33 /* This pass splits stores to and loads from 64 bit vec3
34  * and vec4 local variables to use at most vec2, and it also
35  * splits phi nodes accordingly.
36  *
37  * Arrays of vec3 and vec4 are handled directly, arrays of arrays
38  * are lowered to arrays on the fly.
39  */
40 
41 static bool
nir_split_64bit_vec3_and_vec4_filter(const nir_instr * instr,const void * data)42 nir_split_64bit_vec3_and_vec4_filter(const nir_instr *instr,
43                                      const void *data)
44 {
45    switch (instr->type) {
46    case nir_instr_type_intrinsic: {
47       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
48 
49       switch (intr->intrinsic) {
50       case nir_intrinsic_load_deref: {
51          if (intr->def.bit_size != 64)
52             return false;
53          nir_variable *var = nir_intrinsic_get_var(intr, 0);
54          if (var->data.mode != nir_var_function_temp)
55             return false;
56          return intr->def.num_components >= 3;
57       }
58       case nir_intrinsic_store_deref: {
59          if (nir_src_bit_size(intr->src[1]) != 64)
60             return false;
61          nir_variable *var = nir_intrinsic_get_var(intr, 0);
62          if (var->data.mode != nir_var_function_temp)
63             return false;
64          return nir_src_num_components(intr->src[1]) >= 3;
65       default:
66          return false;
67       }
68       }
69    }
70    case nir_instr_type_phi: {
71       nir_phi_instr *phi = nir_instr_as_phi(instr);
72       if (phi->def.bit_size != 64)
73          return false;
74       return phi->def.num_components >= 3;
75    }
76 
77    default:
78       return false;
79    }
80 }
81 
82 typedef struct {
83    nir_variable *xy;
84    nir_variable *zw;
85 } variable_pair;
86 
87 static nir_def *
merge_to_vec3_or_vec4(nir_builder * b,nir_def * load1,nir_def * load2)88 merge_to_vec3_or_vec4(nir_builder *b, nir_def *load1,
89                       nir_def *load2)
90 {
91    assert(load2->num_components > 0 && load2->num_components < 3);
92 
93    if (load2->num_components == 1)
94       return nir_vec3(b, nir_channel(b, load1, 0),
95                       nir_channel(b, load1, 1),
96                       nir_channel(b, load2, 0));
97    else
98       return nir_vec4(b, nir_channel(b, load1, 0),
99                       nir_channel(b, load1, 1),
100                       nir_channel(b, load2, 0),
101                       nir_channel(b, load2, 1));
102 }
103 
104 static nir_def *
get_linear_array_offset(nir_builder * b,nir_deref_instr * deref)105 get_linear_array_offset(nir_builder *b, nir_deref_instr *deref)
106 {
107    nir_deref_path path;
108    nir_deref_path_init(&path, deref, NULL);
109 
110    nir_def *offset = nir_imm_intN_t(b, 0, deref->def.bit_size);
111    for (nir_deref_instr **p = &path.path[1]; *p; p++) {
112       switch ((*p)->deref_type) {
113       case nir_deref_type_array: {
114          nir_def *index = (*p)->arr.index.ssa;
115          int stride = glsl_array_size((*p)->type);
116          if (stride >= 0)
117             offset = nir_iadd(b, offset, nir_amul_imm(b, index, stride));
118          else
119             offset = nir_iadd(b, offset, index);
120          break;
121       }
122       default:
123          unreachable("Not part of the path");
124       }
125    }
126    nir_deref_path_finish(&path);
127    return offset;
128 }
129 
130 static variable_pair *
get_var_pair(nir_builder * b,nir_variable * old_var,struct hash_table * split_vars)131 get_var_pair(nir_builder *b, nir_variable *old_var,
132              struct hash_table *split_vars)
133 {
134    variable_pair *new_var = NULL;
135    unsigned old_components = glsl_get_components(
136       glsl_without_array_or_matrix(old_var->type));
137 
138    assert(old_components > 2 && old_components <= 4);
139 
140    struct hash_entry *entry = _mesa_hash_table_search(split_vars, old_var);
141    if (!entry) {
142       new_var = (variable_pair *)calloc(1, sizeof(variable_pair));
143       new_var->xy = nir_variable_clone(old_var, b->shader);
144       new_var->zw = nir_variable_clone(old_var, b->shader);
145       new_var->xy->type = glsl_dvec_type(2);
146       new_var->zw->type = glsl_dvec_type(old_components - 2);
147 
148       if (glsl_type_is_array_or_matrix(old_var->type)) {
149          const struct glsl_type *element_type = glsl_without_array(old_var->type);
150          unsigned array_size = glsl_get_aoa_size(old_var->type) * glsl_get_matrix_columns(element_type);
151          new_var->xy->type = glsl_array_type(new_var->xy->type,
152                                              array_size, 0);
153          new_var->zw->type = glsl_array_type(new_var->zw->type,
154                                              array_size, 0);
155       }
156 
157       exec_list_push_tail(&b->impl->locals, &new_var->xy->node);
158       exec_list_push_tail(&b->impl->locals, &new_var->zw->node);
159 
160       _mesa_hash_table_insert(split_vars, old_var, new_var);
161    } else
162       new_var = (variable_pair *)entry->data;
163    return new_var;
164 }
165 
166 static nir_def *
split_load_deref(nir_builder * b,nir_intrinsic_instr * intr,nir_def * offset,struct hash_table * split_vars)167 split_load_deref(nir_builder *b, nir_intrinsic_instr *intr,
168                  nir_def *offset, struct hash_table *split_vars)
169 {
170    nir_variable *old_var = nir_intrinsic_get_var(intr, 0);
171    unsigned old_components = glsl_get_components(
172       glsl_without_array_or_matrix(old_var->type));
173 
174    variable_pair *vars = get_var_pair(b, old_var, split_vars);
175 
176    nir_deref_instr *deref1 = nir_build_deref_var(b, vars->xy);
177    nir_deref_instr *deref2 = nir_build_deref_var(b, vars->zw);
178 
179    if (offset) {
180       deref1 = nir_build_deref_array(b, deref1, offset);
181       deref2 = nir_build_deref_array(b, deref2, offset);
182    }
183 
184    nir_def *load1 = nir_build_load_deref(b, 2, 64, &deref1->def, 0);
185    nir_def *load2 = nir_build_load_deref(b, old_components - 2, 64, &deref2->def, 0);
186 
187    return merge_to_vec3_or_vec4(b, load1, load2);
188 }
189 
190 static nir_def *
split_store_deref(nir_builder * b,nir_intrinsic_instr * intr,nir_def * offset,struct hash_table * split_vars)191 split_store_deref(nir_builder *b, nir_intrinsic_instr *intr,
192                   nir_def *offset, struct hash_table *split_vars)
193 {
194    nir_variable *old_var = nir_intrinsic_get_var(intr, 0);
195 
196    variable_pair *vars = get_var_pair(b, old_var, split_vars);
197 
198    nir_deref_instr *deref_xy = nir_build_deref_var(b, vars->xy);
199    nir_deref_instr *deref_zw = nir_build_deref_var(b, vars->zw);
200 
201    if (offset) {
202       deref_xy = nir_build_deref_array(b, deref_xy, offset);
203       deref_zw = nir_build_deref_array(b, deref_zw, offset);
204    }
205 
206    int write_mask_xy = nir_intrinsic_write_mask(intr) & 3;
207    if (write_mask_xy) {
208       nir_def *src_xy = nir_trim_vector(b, intr->src[1].ssa, 2);
209       nir_build_store_deref(b, &deref_xy->def, src_xy, write_mask_xy);
210    }
211 
212    int write_mask_zw = nir_intrinsic_write_mask(intr) & 0xc;
213    if (write_mask_zw) {
214       nir_def *src_zw = nir_channels(b, intr->src[1].ssa,
215                                      nir_component_mask(intr->src[1].ssa->num_components) & 0xc);
216       nir_build_store_deref(b, &deref_zw->def, src_zw, write_mask_zw >> 2);
217    }
218 
219    return NIR_LOWER_INSTR_PROGRESS_REPLACE;
220 }
221 
222 static nir_def *
split_phi(nir_builder * b,nir_phi_instr * phi)223 split_phi(nir_builder *b, nir_phi_instr *phi)
224 {
225    nir_op vec_op = nir_op_vec(phi->def.num_components);
226 
227    nir_alu_instr *vec = nir_alu_instr_create(b->shader, vec_op);
228    nir_def_init(&vec->instr, &vec->def,
229                 phi->def.num_components, 64);
230 
231    int num_comp[2] = { 2, phi->def.num_components - 2 };
232 
233    nir_phi_instr *new_phi[2];
234 
235    for (unsigned i = 0; i < 2; i++) {
236       new_phi[i] = nir_phi_instr_create(b->shader);
237       nir_def_init(&new_phi[i]->instr, &new_phi[i]->def, num_comp[i],
238                    phi->def.bit_size);
239 
240       nir_foreach_phi_src(src, phi) {
241          /* Insert at the end of the predecessor but before the jump
242           * (This was inspired by nir_lower_phi_to_scalar) */
243          nir_instr *pred_last_instr = nir_block_last_instr(src->pred);
244 
245          if (pred_last_instr && pred_last_instr->type == nir_instr_type_jump)
246             b->cursor = nir_before_instr(pred_last_instr);
247          else
248             b->cursor = nir_after_block(src->pred);
249 
250          nir_def *new_src = nir_channels(b, src->src.ssa,
251                                          ((1 << num_comp[i]) - 1) << (2 * i));
252 
253          nir_phi_instr_add_src(new_phi[i], src->pred, new_src);
254       }
255       nir_instr_insert_before(&phi->instr, &new_phi[i]->instr);
256    }
257 
258    b->cursor = nir_after_instr(&phi->instr);
259    return merge_to_vec3_or_vec4(b, &new_phi[0]->def, &new_phi[1]->def);
260 };
261 
262 static nir_def *
nir_split_64bit_vec3_and_vec4_impl(nir_builder * b,nir_instr * instr,void * d)263 nir_split_64bit_vec3_and_vec4_impl(nir_builder *b, nir_instr *instr, void *d)
264 {
265    struct hash_table *split_vars = (struct hash_table *)d;
266 
267    switch (instr->type) {
268 
269    case nir_instr_type_intrinsic: {
270 
271       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
272       switch (intr->intrinsic) {
273 
274       case nir_intrinsic_load_deref: {
275          nir_deref_instr *deref =
276             nir_instr_as_deref(intr->src[0].ssa->parent_instr);
277          if (deref->deref_type == nir_deref_type_var)
278             return split_load_deref(b, intr, NULL, split_vars);
279          else if (deref->deref_type == nir_deref_type_array) {
280             return split_load_deref(b, intr, get_linear_array_offset(b, deref), split_vars);
281          } else
282             unreachable("Only splitting of loads from vars and arrays");
283       }
284 
285       case nir_intrinsic_store_deref: {
286          nir_deref_instr *deref =
287             nir_instr_as_deref(intr->src[0].ssa->parent_instr);
288          if (deref->deref_type == nir_deref_type_var)
289             return split_store_deref(b, intr, NULL, split_vars);
290          else if (deref->deref_type == nir_deref_type_array)
291             return split_store_deref(b, intr, get_linear_array_offset(b, deref), split_vars);
292          else
293             unreachable("Only splitting of stores to vars and arrays");
294       }
295 
296       default:
297          unreachable("Only splitting load_deref and store_deref");
298       }
299    }
300 
301    case nir_instr_type_phi: {
302       nir_phi_instr *phi = nir_instr_as_phi(instr);
303       return split_phi(b, phi);
304    }
305 
306    default:
307       unreachable("Only splitting load_deref/store_deref and phi");
308    }
309 
310    return NULL;
311 }
312 
313 bool
nir_split_64bit_vec3_and_vec4(nir_shader * sh)314 nir_split_64bit_vec3_and_vec4(nir_shader *sh)
315 {
316    struct hash_table *split_vars = _mesa_pointer_hash_table_create(NULL);
317 
318    bool progress =
319       nir_shader_lower_instructions(sh,
320                                     nir_split_64bit_vec3_and_vec4_filter,
321                                     nir_split_64bit_vec3_and_vec4_impl,
322                                     split_vars);
323 
324    _mesa_hash_table_destroy(split_vars, NULL);
325    return progress;
326 }
327