1 /*
2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
4 * Copyright © 2014 Intel Corporation
5 * Copyright © 2017 Advanced Micro Devices, Inc.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27 /**
28 * \file
29 *
30 * Lower sampler and image references of (non-bindless) uniforms by removing
31 * struct dereferences, and synthesizing new uniform variables without structs
32 * if required.
33 *
34 * This will allow backends to have a simple, uniform treatment of bindless and
35 * non-bindless samplers and images.
36 *
37 * Example:
38 *
39 * struct S {
40 * sampler2D tex[2];
41 * sampler2D other;
42 * };
43 * uniform S s[2];
44 *
45 * tmp = texture(s[n].tex[m], coord);
46 *
47 * Becomes:
48 *
49 * decl_var uniform INTERP_MODE_NONE sampler2D[2][2] [email protected] (...)
50 *
51 * vec1 32 ssa_idx = $(2 * n + m)
52 * vec4 32 ssa_out = tex ssa_coord (coord), [email protected][n][m] (texture), [email protected][n][m] (sampler)
53 *
54 * and [email protected] has var->data.binding set to the base index as defined by
55 * the opaque uniform mapping.
56 */
57
58 #include "compiler/nir/nir.h"
59 #include "compiler/nir/nir_builder.h"
60 #include "compiler/nir/nir_deref.h"
61 #include "gl_nir.h"
62 #include "ir_uniform.h"
63
64 #include "util/compiler.h"
65 #include "main/shader_types.h"
66
67 struct lower_samplers_as_deref_state {
68 nir_shader *shader;
69 const struct gl_shader_program *shader_program;
70 struct hash_table *remap_table;
71 };
72
73 /* Prepare for removing struct derefs. This pre-pass generates the name
74 * of the lowered deref, and calculates the lowered type and location.
75 * After that, once looking up (or creating if needed) the lowered var,
76 * constructing the new chain of deref instructions is a simple loop
77 * that skips the struct deref's
78 *
79 * path: appended to as we descend down the chain of deref instrs
80 * and remove struct derefs
81 * location: increased as we descend down and remove struct derefs
82 * type: updated as we recurse back up the chain of deref instrs
83 * with the resulting type after removing struct derefs
84 */
85 static void
remove_struct_derefs_prep(nir_deref_instr ** p,char ** name,unsigned * location,const struct glsl_type ** type)86 remove_struct_derefs_prep(nir_deref_instr **p, char **name,
87 unsigned *location, const struct glsl_type **type)
88 {
89 nir_deref_instr *cur = p[0], *next = p[1];
90
91 if (!next) {
92 *type = cur->type;
93 return;
94 }
95
96 switch (next->deref_type) {
97 case nir_deref_type_array: {
98 unsigned length = glsl_get_length(cur->type);
99
100 remove_struct_derefs_prep(&p[1], name, location, type);
101
102 *type = glsl_array_type(*type, length, glsl_get_explicit_stride(cur->type));
103 break;
104 }
105
106 case nir_deref_type_struct: {
107 *location += glsl_get_struct_location_offset(cur->type, next->strct.index);
108 ralloc_asprintf_append(name, ".%s",
109 glsl_get_struct_elem_name(cur->type, next->strct.index));
110
111 remove_struct_derefs_prep(&p[1], name, location, type);
112 break;
113 }
114
115 default:
116 unreachable("Invalid deref type");
117 break;
118 }
119 }
120
121 static void
record_images_used(struct shader_info * info,nir_intrinsic_instr * instr)122 record_images_used(struct shader_info *info,
123 nir_intrinsic_instr *instr)
124 {
125 nir_variable *var = nir_intrinsic_get_var(instr, 0);
126
127 /* Structs have been lowered already, so get_aoa_size is sufficient. */
128 const unsigned size =
129 glsl_type_is_array(var->type) ? glsl_get_aoa_size(var->type) : 1;
130
131 BITSET_SET_RANGE(info->images_used, var->data.binding,
132 var->data.binding + (MAX2(size, 1) - 1));
133
134 enum glsl_sampler_dim sampler_dim =
135 glsl_get_sampler_dim(glsl_without_array(var->type));
136 if (sampler_dim == GLSL_SAMPLER_DIM_BUF) {
137 BITSET_SET_RANGE(info->image_buffers, var->data.binding,
138 var->data.binding + (MAX2(size, 1) - 1));
139 }
140 if (sampler_dim == GLSL_SAMPLER_DIM_MS) {
141 BITSET_SET_RANGE(info->msaa_images, var->data.binding,
142 var->data.binding + (MAX2(size, 1) - 1));
143 }
144 }
145
146
147 static nir_deref_instr *
lower_deref(nir_builder * b,struct lower_samplers_as_deref_state * state,nir_deref_instr * deref)148 lower_deref(nir_builder *b, struct lower_samplers_as_deref_state *state,
149 nir_deref_instr *deref)
150 {
151 nir_variable *var = nir_deref_instr_get_variable(deref);
152 gl_shader_stage stage = state->shader->info.stage;
153
154 if (!(var->data.mode & (nir_var_uniform | nir_var_image)) ||
155 var->data.bindless)
156 return NULL;
157
158 nir_deref_path path;
159 nir_deref_path_init(&path, deref, state->remap_table);
160 assert(path.path[0]->deref_type == nir_deref_type_var);
161
162 char *name = ralloc_asprintf(state->remap_table, "lower@%s", var->name);
163 unsigned location = var->data.location;
164 const struct glsl_type *type = NULL;
165 unsigned binding;
166
167 /*
168 * We end up needing to do this in two passes, in order to generate
169 * the name of the lowered var (and detecting whether there even are
170 * any struct deref's), and then the second pass to construct the
171 * actual deref instructions after looking up / generating a new
172 * nir_variable (since we need to construct the deref_var first)
173 */
174
175 remove_struct_derefs_prep(path.path, &name, &location, &type);
176
177 if (state->shader_program && var->data.how_declared != nir_var_hidden) {
178 /* For GLSL programs, look up the bindings in the uniform storage. */
179 assert(location < state->shader_program->data->NumUniformStorage &&
180 state->shader_program->data->UniformStorage[location].opaque[stage].active);
181
182 binding = state->shader_program->data->UniformStorage[location].opaque[stage].index;
183 } else {
184 /* For ARB programs, built-in shaders, or internally generated sampler
185 * variables in GLSL programs, assume that whoever created the shader
186 * set the bindings correctly already.
187 */
188 assert(var->data.explicit_binding);
189 binding = var->data.binding;
190 }
191
192 if (var->type == type) {
193 /* Fast path: We did not encounter any struct derefs. */
194 var->data.binding = binding;
195 return deref;
196 }
197
198 uint32_t hash = _mesa_hash_string(name);
199 struct hash_entry *h =
200 _mesa_hash_table_search_pre_hashed(state->remap_table, hash, name);
201
202 if (h) {
203 var = (nir_variable *)h->data;
204 } else {
205 var = nir_variable_create(state->shader, var->data.mode, type, name);
206 var->data.binding = binding;
207
208 /* Don't set var->data.location. The old structure location could be
209 * used to index into gl_uniform_storage, assuming the full structure
210 * was walked in order. With the new split variables, this invariant
211 * no longer holds and there's no meaningful way to start from a base
212 * location and access a particular array element. Just leave it 0.
213 */
214
215 _mesa_hash_table_insert_pre_hashed(state->remap_table, hash, name, var);
216 }
217
218 /* construct a new deref based on lowered var (skipping the struct deref's
219 * from the original deref:
220 */
221 nir_deref_instr *new_deref = nir_build_deref_var(b, var);
222 for (nir_deref_instr **p = &path.path[1]; *p; p++) {
223 if ((*p)->deref_type == nir_deref_type_struct)
224 continue;
225
226 assert((*p)->deref_type == nir_deref_type_array);
227
228 new_deref = nir_build_deref_array(b, new_deref,
229 (*p)->arr.index.ssa);
230 }
231
232 return new_deref;
233 }
234
235 static void
record_textures_used(struct shader_info * info,nir_deref_instr * deref,nir_texop op)236 record_textures_used(struct shader_info *info,
237 nir_deref_instr *deref,
238 nir_texop op)
239 {
240 nir_variable *var = nir_deref_instr_get_variable(deref);
241
242 /* Structs have been lowered already, so get_aoa_size is sufficient. */
243 const unsigned size =
244 glsl_type_is_array(var->type) ? glsl_get_aoa_size(var->type) : 1;
245
246 BITSET_SET_RANGE(info->textures_used, var->data.binding,
247 var->data.binding + (MAX2(size, 1) - 1));
248
249 if (op == nir_texop_txf ||
250 op == nir_texop_txf_ms ||
251 op == nir_texop_txf_ms_mcs_intel) {
252 BITSET_SET_RANGE(info->textures_used_by_txf, var->data.binding,
253 var->data.binding + (MAX2(size, 1) - 1));
254 }
255 }
256
257 static void
record_samplers_used(struct shader_info * info,nir_deref_instr * deref,nir_texop op)258 record_samplers_used(struct shader_info *info,
259 nir_deref_instr *deref,
260 nir_texop op)
261 {
262 nir_variable *var = nir_deref_instr_get_variable(deref);
263
264 /* Structs have been lowered already, so get_aoa_size is sufficient. */
265 const unsigned size =
266 glsl_type_is_array(var->type) ? glsl_get_aoa_size(var->type) : 1;
267
268 BITSET_SET_RANGE(info->samplers_used, var->data.binding,
269 var->data.binding + (MAX2(size, 1) - 1));
270 }
271
272 static bool
lower_sampler(nir_tex_instr * instr,struct lower_samplers_as_deref_state * state,nir_builder * b)273 lower_sampler(nir_tex_instr *instr, struct lower_samplers_as_deref_state *state,
274 nir_builder *b)
275 {
276 int texture_idx =
277 nir_tex_instr_src_index(instr, nir_tex_src_texture_deref);
278 int sampler_idx =
279 nir_tex_instr_src_index(instr, nir_tex_src_sampler_deref);
280
281 b->cursor = nir_before_instr(&instr->instr);
282
283 if (texture_idx >= 0) {
284 nir_deref_instr *texture_deref =
285 lower_deref(b, state, nir_src_as_deref(instr->src[texture_idx].src));
286 /* only lower non-bindless: */
287 if (texture_deref) {
288 nir_src_rewrite(&instr->src[texture_idx].src, &texture_deref->def);
289 record_textures_used(&b->shader->info, texture_deref, instr->op);
290 }
291 }
292
293 if (sampler_idx >= 0) {
294 nir_deref_instr *sampler_deref =
295 lower_deref(b, state, nir_src_as_deref(instr->src[sampler_idx].src));
296 /* only lower non-bindless: */
297 if (sampler_deref) {
298 nir_src_rewrite(&instr->src[sampler_idx].src, &sampler_deref->def);
299 record_samplers_used(&b->shader->info, sampler_deref, instr->op);
300 }
301 }
302
303 return true;
304 }
305
306 static bool
lower_intrinsic(nir_intrinsic_instr * instr,struct lower_samplers_as_deref_state * state,nir_builder * b)307 lower_intrinsic(nir_intrinsic_instr *instr,
308 struct lower_samplers_as_deref_state *state,
309 nir_builder *b)
310 {
311 if (instr->intrinsic == nir_intrinsic_image_deref_load ||
312 instr->intrinsic == nir_intrinsic_image_deref_store ||
313 instr->intrinsic == nir_intrinsic_image_deref_atomic ||
314 instr->intrinsic == nir_intrinsic_image_deref_atomic_swap ||
315 instr->intrinsic == nir_intrinsic_image_deref_size ||
316 instr->intrinsic == nir_intrinsic_image_deref_samples_identical ||
317 instr->intrinsic == nir_intrinsic_image_deref_descriptor_amd ||
318 instr->intrinsic == nir_intrinsic_image_deref_samples) {
319
320 b->cursor = nir_before_instr(&instr->instr);
321 nir_deref_instr *deref =
322 lower_deref(b, state, nir_src_as_deref(instr->src[0]));
323
324 record_images_used(&state->shader->info, instr);
325
326 /* don't lower bindless: */
327 if (!deref)
328 return false;
329 nir_src_rewrite(&instr->src[0], &deref->def);
330 return true;
331 }
332 if (instr->intrinsic == nir_intrinsic_image_deref_order ||
333 instr->intrinsic == nir_intrinsic_image_deref_format)
334 unreachable("how did you even manage this?");
335
336 return false;
337 }
338
339 static bool
lower_instr(nir_builder * b,nir_instr * instr,void * cb_data)340 lower_instr(nir_builder *b, nir_instr *instr, void *cb_data)
341 {
342 struct lower_samplers_as_deref_state *state = cb_data;
343
344 if (instr->type == nir_instr_type_tex)
345 return lower_sampler(nir_instr_as_tex(instr), state, b);
346
347 if (instr->type == nir_instr_type_intrinsic)
348 return lower_intrinsic(nir_instr_as_intrinsic(instr), state, b);
349
350 return false;
351 }
352
353 bool
gl_nir_lower_samplers_as_deref(nir_shader * shader,const struct gl_shader_program * shader_program)354 gl_nir_lower_samplers_as_deref(nir_shader *shader,
355 const struct gl_shader_program *shader_program)
356 {
357 struct lower_samplers_as_deref_state state;
358
359 state.shader = shader;
360 state.shader_program = shader_program;
361 state.remap_table = _mesa_hash_table_create(NULL, _mesa_hash_string,
362 _mesa_key_string_equal);
363
364 bool progress = nir_shader_instructions_pass(shader, lower_instr,
365 nir_metadata_control_flow,
366 &state);
367
368 if (progress) {
369 nir_remove_dead_derefs(shader);
370 if (!shader->info.internal && shader_program) {
371 /* try to apply bindings for unused samplers to avoid index zero clobbering in backends */
372 nir_foreach_uniform_variable(var, shader) {
373 /* ignore hidden variables */
374 if (!glsl_type_is_sampler(glsl_without_array(var->type)) ||
375 var->data.how_declared == nir_var_hidden)
376 continue;
377 bool found = false;
378 hash_table_foreach(state.remap_table, entry) {
379 if (var == entry->data) {
380 found = true;
381 break;
382 }
383 }
384 if (!found) {
385 /* same as lower_deref() */
386 var->data.binding = shader_program->data->UniformStorage[var->data.location].opaque[shader->info.stage].index;
387 }
388 }
389 }
390 }
391
392 /* keys are freed automatically by ralloc */
393 _mesa_hash_table_destroy(state.remap_table, NULL);
394
395 return progress;
396 }
397