1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_deref.h"
26 #include "gl_nir_linker.h"
27 #include "compiler/glsl/ir_uniform.h" /* for gl_uniform_storage */
28 #include "linker_util.h"
29 #include "util/u_dynarray.h"
30 #include "util/u_math.h"
31 #include "main/consts_exts.h"
32 #include "main/shader_types.h"
33
34 /**
35 * This file do the common link for GLSL uniforms, using NIR, instead of IR as
36 * the counter-part glsl/link_uniforms.cpp
37 */
38
39 #define UNMAPPED_UNIFORM_LOC ~0u
40
41 struct uniform_array_info {
42 /** List of dereferences of the uniform array. */
43 struct util_dynarray *deref_list;
44
45 /** Set of bit-flags to note which array elements have been accessed. */
46 BITSET_WORD *indices;
47 };
48
49 static unsigned
uniform_storage_size(const struct glsl_type * type)50 uniform_storage_size(const struct glsl_type *type)
51 {
52 switch (glsl_get_base_type(type)) {
53 case GLSL_TYPE_STRUCT:
54 case GLSL_TYPE_INTERFACE: {
55 unsigned size = 0;
56 for (unsigned i = 0; i < glsl_get_length(type); i++)
57 size += uniform_storage_size(glsl_get_struct_field(type, i));
58 return size;
59 }
60 case GLSL_TYPE_ARRAY: {
61 const struct glsl_type *e_type = glsl_get_array_element(type);
62 enum glsl_base_type e_base_type = glsl_get_base_type(e_type);
63 if (e_base_type == GLSL_TYPE_STRUCT ||
64 e_base_type == GLSL_TYPE_INTERFACE ||
65 e_base_type == GLSL_TYPE_ARRAY) {
66 unsigned length = !glsl_type_is_unsized_array(type) ?
67 glsl_get_length(type) : 1;
68 return length * uniform_storage_size(e_type);
69 } else
70 return 1;
71 }
72 default:
73 return 1;
74 }
75 }
76
77 /**
78 * Update the sizes of linked shader uniform arrays to the maximum
79 * array index used.
80 *
81 * From page 81 (page 95 of the PDF) of the OpenGL 2.1 spec:
82 *
83 * If one or more elements of an array are active,
84 * GetActiveUniform will return the name of the array in name,
85 * subject to the restrictions listed above. The type of the array
86 * is returned in type. The size parameter contains the highest
87 * array element index used, plus one. The compiler or linker
88 * determines the highest index used. There will be only one
89 * active uniform reported by the GL per uniform array.
90 */
91 static void
update_array_sizes(struct gl_shader_program * prog,nir_variable * var,struct hash_table ** referenced_uniforms,unsigned current_var_stage)92 update_array_sizes(struct gl_shader_program *prog, nir_variable *var,
93 struct hash_table **referenced_uniforms,
94 unsigned current_var_stage)
95 {
96 /* For now we only resize 1D arrays.
97 * TODO: add support for resizing more complex array types ??
98 */
99 if (!glsl_type_is_array(var->type) ||
100 glsl_type_is_array(glsl_get_array_element(var->type)))
101 return;
102
103 /* GL_ARB_uniform_buffer_object says that std140 uniforms
104 * will not be eliminated. Since we always do std140, just
105 * don't resize arrays in UBOs.
106 *
107 * Atomic counters are supposed to get deterministic
108 * locations assigned based on the declaration ordering and
109 * sizes, array compaction would mess that up.
110 *
111 * Subroutine uniforms are not removed.
112 */
113 if (nir_variable_is_in_block(var) || glsl_contains_atomic(var->type) ||
114 glsl_get_base_type(glsl_without_array(var->type)) == GLSL_TYPE_SUBROUTINE ||
115 var->constant_initializer)
116 return;
117
118 struct uniform_array_info *ainfo = NULL;
119 int words = BITSET_WORDS(glsl_array_size(var->type));
120 int max_array_size = 0;
121 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
122 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
123 if (!sh)
124 continue;
125
126 struct hash_entry *entry =
127 _mesa_hash_table_search(referenced_uniforms[stage], var->name);
128 if (entry) {
129 ainfo = (struct uniform_array_info *) entry->data;
130 max_array_size = MAX2(BITSET_LAST_BIT_SIZED(ainfo->indices, words),
131 max_array_size);
132 }
133
134 if (max_array_size == glsl_array_size(var->type))
135 return;
136 }
137
138 if (max_array_size != glsl_array_size(var->type)) {
139 /* If this is a built-in uniform (i.e., it's backed by some
140 * fixed-function state), adjust the number of state slots to
141 * match the new array size. The number of slots per array entry
142 * is not known. It seems safe to assume that the total number of
143 * slots is an integer multiple of the number of array elements.
144 * Determine the number of slots per array element by dividing by
145 * the old (total) size.
146 */
147 const unsigned num_slots = var->num_state_slots;
148 if (num_slots > 0) {
149 var->num_state_slots =
150 (max_array_size * (num_slots / glsl_array_size(var->type)));
151 }
152
153 var->type = glsl_array_type(glsl_get_array_element(var->type),
154 max_array_size, 0);
155
156 /* Update the types of dereferences in case we changed any. */
157 struct hash_entry *entry =
158 _mesa_hash_table_search(referenced_uniforms[current_var_stage], var->name);
159 if (entry) {
160 struct uniform_array_info *ainfo =
161 (struct uniform_array_info *) entry->data;
162 util_dynarray_foreach(ainfo->deref_list, nir_deref_instr *, deref) {
163 (*deref)->type = var->type;
164 }
165 }
166 }
167 }
168
169 static void
nir_setup_uniform_remap_tables(const struct gl_constants * consts,struct gl_shader_program * prog)170 nir_setup_uniform_remap_tables(const struct gl_constants *consts,
171 struct gl_shader_program *prog)
172 {
173 unsigned total_entries = prog->NumExplicitUniformLocations;
174
175 /* For glsl this may have been allocated by reserve_explicit_locations() so
176 * that we can keep track of unused uniforms with explicit locations.
177 */
178 assert(!prog->data->spirv ||
179 (prog->data->spirv && !prog->UniformRemapTable));
180 if (!prog->UniformRemapTable) {
181 prog->UniformRemapTable = rzalloc_array(prog,
182 struct gl_uniform_storage *,
183 prog->NumUniformRemapTable);
184 }
185
186 union gl_constant_value *data =
187 rzalloc_array(prog->data,
188 union gl_constant_value, prog->data->NumUniformDataSlots);
189 if (!prog->UniformRemapTable || !data) {
190 linker_error(prog, "Out of memory during linking.\n");
191 return;
192 }
193 prog->data->UniformDataSlots = data;
194
195 prog->data->UniformDataDefaults =
196 rzalloc_array(prog->data->UniformDataSlots,
197 union gl_constant_value, prog->data->NumUniformDataSlots);
198
199 unsigned data_pos = 0;
200
201 /* Reserve all the explicit locations of the active uniforms. */
202 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
203 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
204
205 if (uniform->hidden)
206 continue;
207
208 if (uniform->is_shader_storage ||
209 glsl_get_base_type(uniform->type) == GLSL_TYPE_SUBROUTINE)
210 continue;
211
212 if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
213 continue;
214
215 /* How many new entries for this uniform? */
216 const unsigned entries = MAX2(1, uniform->array_elements);
217 unsigned num_slots = glsl_get_component_slots(uniform->type);
218
219 uniform->storage = &data[data_pos];
220
221 /* Set remap table entries point to correct gl_uniform_storage. */
222 for (unsigned j = 0; j < entries; j++) {
223 unsigned element_loc = uniform->remap_location + j;
224 prog->UniformRemapTable[element_loc] = uniform;
225
226 data_pos += num_slots;
227 }
228 }
229
230 /* Reserve locations for rest of the uniforms. */
231 if (prog->data->spirv)
232 link_util_update_empty_uniform_locations(prog);
233
234 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
235 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
236
237 if (uniform->hidden)
238 continue;
239
240 if (uniform->is_shader_storage ||
241 glsl_get_base_type(uniform->type) == GLSL_TYPE_SUBROUTINE)
242 continue;
243
244 /* Built-in uniforms should not get any location. */
245 if (uniform->builtin)
246 continue;
247
248 /* Explicit ones have been set already. */
249 if (uniform->remap_location != UNMAPPED_UNIFORM_LOC)
250 continue;
251
252 /* How many entries for this uniform? */
253 const unsigned entries = MAX2(1, uniform->array_elements);
254
255 /* Add new entries to the total amount for checking against MAX_UNIFORM-
256 * _LOCATIONS. This only applies to the default uniform block (-1),
257 * because locations of uniform block entries are not assignable.
258 */
259 if (prog->data->UniformStorage[i].block_index == -1)
260 total_entries += entries;
261
262 unsigned location =
263 link_util_find_empty_block(prog, &prog->data->UniformStorage[i]);
264
265 if (location == -1) {
266 location = prog->NumUniformRemapTable;
267
268 /* resize remap table to fit new entries */
269 prog->UniformRemapTable =
270 reralloc(prog,
271 prog->UniformRemapTable,
272 struct gl_uniform_storage *,
273 prog->NumUniformRemapTable + entries);
274 prog->NumUniformRemapTable += entries;
275 }
276
277 /* set the base location in remap table for the uniform */
278 uniform->remap_location = location;
279
280 unsigned num_slots = glsl_get_component_slots(uniform->type);
281
282 if (uniform->block_index == -1)
283 uniform->storage = &data[data_pos];
284
285 /* Set remap table entries point to correct gl_uniform_storage. */
286 for (unsigned j = 0; j < entries; j++) {
287 unsigned element_loc = uniform->remap_location + j;
288 prog->UniformRemapTable[element_loc] = uniform;
289
290 if (uniform->block_index == -1)
291 data_pos += num_slots;
292 }
293 }
294
295 /* Verify that total amount of entries for explicit and implicit locations
296 * is less than MAX_UNIFORM_LOCATIONS.
297 */
298 if (total_entries > consts->MaxUserAssignableUniformLocations) {
299 linker_error(prog, "count of uniform locations > MAX_UNIFORM_LOCATIONS"
300 "(%u > %u)", total_entries,
301 consts->MaxUserAssignableUniformLocations);
302 }
303
304 /* Reserve all the explicit locations of the active subroutine uniforms. */
305 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
306 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
307
308 if (glsl_get_base_type(uniform->type) != GLSL_TYPE_SUBROUTINE)
309 continue;
310
311 if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
312 continue;
313
314 /* How many new entries for this uniform? */
315 const unsigned entries =
316 MAX2(1, prog->data->UniformStorage[i].array_elements);
317
318 uniform->storage = &data[data_pos];
319
320 unsigned num_slots = glsl_get_component_slots(uniform->type);
321 unsigned mask = prog->data->linked_stages;
322 while (mask) {
323 const int j = u_bit_scan(&mask);
324 struct gl_program *p = prog->_LinkedShaders[j]->Program;
325
326 if (!prog->data->UniformStorage[i].opaque[j].active)
327 continue;
328
329 /* Set remap table entries point to correct gl_uniform_storage. */
330 for (unsigned k = 0; k < entries; k++) {
331 unsigned element_loc =
332 prog->data->UniformStorage[i].remap_location + k;
333 p->sh.SubroutineUniformRemapTable[element_loc] =
334 &prog->data->UniformStorage[i];
335
336 data_pos += num_slots;
337 }
338 }
339 }
340
341 /* reserve subroutine locations */
342 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
343 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
344
345 if (glsl_get_base_type(uniform->type) != GLSL_TYPE_SUBROUTINE)
346 continue;
347
348 if (prog->data->UniformStorage[i].remap_location !=
349 UNMAPPED_UNIFORM_LOC)
350 continue;
351
352 const unsigned entries =
353 MAX2(1, prog->data->UniformStorage[i].array_elements);
354
355 uniform->storage = &data[data_pos];
356
357 unsigned num_slots = glsl_get_component_slots(uniform->type);
358 unsigned mask = prog->data->linked_stages;
359 while (mask) {
360 const int j = u_bit_scan(&mask);
361 struct gl_program *p = prog->_LinkedShaders[j]->Program;
362
363 if (!prog->data->UniformStorage[i].opaque[j].active)
364 continue;
365
366 p->sh.SubroutineUniformRemapTable =
367 reralloc(p,
368 p->sh.SubroutineUniformRemapTable,
369 struct gl_uniform_storage *,
370 p->sh.NumSubroutineUniformRemapTable + entries);
371
372 for (unsigned k = 0; k < entries; k++) {
373 p->sh.SubroutineUniformRemapTable[p->sh.NumSubroutineUniformRemapTable + k] =
374 &prog->data->UniformStorage[i];
375
376 data_pos += num_slots;
377 }
378 prog->data->UniformStorage[i].remap_location =
379 p->sh.NumSubroutineUniformRemapTable;
380 p->sh.NumSubroutineUniformRemapTable += entries;
381 }
382 }
383
384 /* assign storage to hidden uniforms */
385 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
386 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
387
388 if (!uniform->hidden ||
389 glsl_get_base_type(uniform->type) == GLSL_TYPE_SUBROUTINE)
390 continue;
391
392 const unsigned entries =
393 MAX2(1, prog->data->UniformStorage[i].array_elements);
394
395 uniform->storage = &data[data_pos];
396
397 unsigned num_slots = glsl_get_component_slots(uniform->type);
398 for (unsigned k = 0; k < entries; k++)
399 data_pos += num_slots;
400 }
401 }
402
403 static void
add_var_use_deref(nir_deref_instr * deref,struct hash_table * live,struct array_deref_range ** derefs,unsigned * derefs_size)404 add_var_use_deref(nir_deref_instr *deref, struct hash_table *live,
405 struct array_deref_range **derefs, unsigned *derefs_size)
406 {
407 nir_deref_path path;
408 nir_deref_path_init(&path, deref, NULL);
409
410 deref = path.path[0];
411 if (deref->deref_type != nir_deref_type_var ||
412 !nir_deref_mode_is_one_of(deref, nir_var_uniform |
413 nir_var_mem_ubo |
414 nir_var_mem_ssbo |
415 nir_var_image)) {
416 nir_deref_path_finish(&path);
417 return;
418 }
419
420 /* Number of derefs used in current processing. */
421 unsigned num_derefs = 0;
422
423 const struct glsl_type *deref_type = deref->var->type;
424 nir_deref_instr **p = &path.path[1];
425 for (; *p; p++) {
426 if ((*p)->deref_type == nir_deref_type_array) {
427
428 /* Skip matrix derefences */
429 if (!glsl_type_is_array(deref_type))
430 break;
431
432 if ((num_derefs + 1) * sizeof(struct array_deref_range) > *derefs_size) {
433 void *ptr = reralloc_size(NULL, *derefs, *derefs_size + 4096);
434
435 if (ptr == NULL) {
436 nir_deref_path_finish(&path);
437 return;
438 }
439
440 *derefs_size += 4096;
441 *derefs = (struct array_deref_range *)ptr;
442 }
443
444 struct array_deref_range *dr = &(*derefs)[num_derefs];
445 num_derefs++;
446
447 dr->size = glsl_get_length(deref_type);
448
449 if (nir_src_is_const((*p)->arr.index)) {
450 dr->index = nir_src_as_uint((*p)->arr.index);
451 } else {
452 /* An unsized array can occur at the end of an SSBO. We can't track
453 * accesses to such an array, so bail.
454 */
455 if (dr->size == 0) {
456 nir_deref_path_finish(&path);
457 return;
458 }
459
460 dr->index = dr->size;
461 }
462
463 deref_type = glsl_get_array_element(deref_type);
464 } else if ((*p)->deref_type == nir_deref_type_struct) {
465 /* We have reached the end of the array. */
466 break;
467 }
468 }
469
470 nir_deref_path_finish(&path);
471
472
473 struct uniform_array_info *ainfo = NULL;
474
475 struct hash_entry *entry =
476 _mesa_hash_table_search(live, deref->var->name);
477 if (!entry && glsl_type_is_array(deref->var->type)) {
478 ainfo = ralloc(live, struct uniform_array_info);
479
480 unsigned num_bits = MAX2(1, glsl_get_aoa_size(deref->var->type));
481 ainfo->indices = rzalloc_array(live, BITSET_WORD, BITSET_WORDS(num_bits));
482
483 ainfo->deref_list = ralloc(live, struct util_dynarray);
484 util_dynarray_init(ainfo->deref_list, live);
485 }
486
487 if (entry)
488 ainfo = (struct uniform_array_info *) entry->data;
489
490 if (glsl_type_is_array(deref->var->type)) {
491 /* Count the "depth" of the arrays-of-arrays. */
492 unsigned array_depth = 0;
493 for (const struct glsl_type *type = deref->var->type;
494 glsl_type_is_array(type);
495 type = glsl_get_array_element(type)) {
496 array_depth++;
497 }
498
499 link_util_mark_array_elements_referenced(*derefs, num_derefs, array_depth,
500 ainfo->indices);
501
502 util_dynarray_append(ainfo->deref_list, nir_deref_instr *, deref);
503 }
504
505 assert(deref->modes == deref->var->data.mode);
506 _mesa_hash_table_insert(live, deref->var->name, ainfo);
507 }
508
509 /* Iterate over the shader and collect infomation about uniform use */
510 static void
add_var_use_shader(nir_shader * shader,struct hash_table * live)511 add_var_use_shader(nir_shader *shader, struct hash_table *live)
512 {
513 /* Currently allocated buffer block of derefs. */
514 struct array_deref_range *derefs = NULL;
515
516 /* Size of the derefs buffer in bytes. */
517 unsigned derefs_size = 0;
518
519 nir_foreach_function_impl(impl, shader) {
520 nir_foreach_block(block, impl) {
521 nir_foreach_instr(instr, block) {
522 if (instr->type == nir_instr_type_intrinsic) {
523 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
524 switch (intr->intrinsic) {
525 case nir_intrinsic_atomic_counter_read_deref:
526 case nir_intrinsic_atomic_counter_inc_deref:
527 case nir_intrinsic_atomic_counter_pre_dec_deref:
528 case nir_intrinsic_atomic_counter_post_dec_deref:
529 case nir_intrinsic_atomic_counter_add_deref:
530 case nir_intrinsic_atomic_counter_min_deref:
531 case nir_intrinsic_atomic_counter_max_deref:
532 case nir_intrinsic_atomic_counter_and_deref:
533 case nir_intrinsic_atomic_counter_or_deref:
534 case nir_intrinsic_atomic_counter_xor_deref:
535 case nir_intrinsic_atomic_counter_exchange_deref:
536 case nir_intrinsic_atomic_counter_comp_swap_deref:
537 case nir_intrinsic_image_deref_load:
538 case nir_intrinsic_image_deref_store:
539 case nir_intrinsic_image_deref_atomic:
540 case nir_intrinsic_image_deref_atomic_swap:
541 case nir_intrinsic_image_deref_size:
542 case nir_intrinsic_image_deref_samples:
543 case nir_intrinsic_load_deref:
544 case nir_intrinsic_store_deref:
545 add_var_use_deref(nir_src_as_deref(intr->src[0]), live,
546 &derefs, &derefs_size);
547 break;
548
549 default:
550 /* Nothing to do */
551 break;
552 }
553 } else if (instr->type == nir_instr_type_tex) {
554 nir_tex_instr *tex_instr = nir_instr_as_tex(instr);
555 int sampler_idx =
556 nir_tex_instr_src_index(tex_instr,
557 nir_tex_src_sampler_deref);
558 int texture_idx =
559 nir_tex_instr_src_index(tex_instr,
560 nir_tex_src_texture_deref);
561
562 if (sampler_idx >= 0) {
563 nir_deref_instr *deref =
564 nir_src_as_deref(tex_instr->src[sampler_idx].src);
565 add_var_use_deref(deref, live, &derefs, &derefs_size);
566 }
567
568 if (texture_idx >= 0) {
569 nir_deref_instr *deref =
570 nir_src_as_deref(tex_instr->src[texture_idx].src);
571 add_var_use_deref(deref, live, &derefs, &derefs_size);
572 }
573 }
574 }
575 }
576 }
577
578 ralloc_free(derefs);
579 }
580
581 static void
mark_stage_as_active(struct gl_uniform_storage * uniform,unsigned stage)582 mark_stage_as_active(struct gl_uniform_storage *uniform,
583 unsigned stage)
584 {
585 uniform->active_shader_mask |= 1 << stage;
586 }
587
588 /* Used to build a tree representing the glsl_type so that we can have a place
589 * to store the next index for opaque types. Array types are expanded so that
590 * they have a single child which is used for all elements of the array.
591 * Struct types have a child for each member. The tree is walked while
592 * processing a uniform so that we can recognise when an opaque type is
593 * encountered a second time in order to reuse the same range of indices that
594 * was reserved the first time. That way the sampler indices can be arranged
595 * so that members of an array are placed sequentially even if the array is an
596 * array of structs containing other opaque members.
597 */
598 struct type_tree_entry {
599 /* For opaque types, this will be the next index to use. If we haven’t
600 * encountered this member yet, it will be UINT_MAX.
601 */
602 unsigned next_index;
603 unsigned array_size;
604 struct type_tree_entry *parent;
605 struct type_tree_entry *next_sibling;
606 struct type_tree_entry *children;
607 };
608
609 struct nir_link_uniforms_state {
610 /* per-whole program */
611 unsigned num_hidden_uniforms;
612 unsigned num_values;
613 unsigned max_uniform_location;
614
615 /* per-shader stage */
616 unsigned next_bindless_image_index;
617 unsigned next_bindless_sampler_index;
618 unsigned next_image_index;
619 unsigned next_sampler_index;
620 unsigned next_subroutine;
621 unsigned num_shader_samplers;
622 unsigned num_shader_images;
623 unsigned num_shader_uniform_components;
624 unsigned shader_samplers_used;
625 unsigned shader_shadow_samplers;
626 unsigned shader_storage_blocks_write_access;
627 struct gl_program_parameter_list *params;
628
629 /* per-variable */
630 nir_variable *current_var;
631 const struct glsl_type *current_ifc_type;
632 int offset;
633 bool var_is_in_block;
634 bool set_top_level_array;
635 int top_level_array_size;
636 int top_level_array_stride;
637
638 struct type_tree_entry *current_type;
639 struct hash_table *referenced_uniforms[MESA_SHADER_STAGES];
640 struct hash_table *uniform_hash;
641 };
642
643 static void
add_parameter(struct gl_uniform_storage * uniform,const struct gl_constants * consts,struct gl_shader_program * prog,const struct glsl_type * type,struct nir_link_uniforms_state * state)644 add_parameter(struct gl_uniform_storage *uniform,
645 const struct gl_constants *consts,
646 struct gl_shader_program *prog,
647 const struct glsl_type *type,
648 struct nir_link_uniforms_state *state)
649 {
650 /* Builtin uniforms are backed by PROGRAM_STATE_VAR, so don't add them as
651 * uniforms.
652 */
653 if (uniform->builtin)
654 return;
655
656 if (!state->params || uniform->is_shader_storage ||
657 (glsl_contains_opaque(type) && !state->current_var->data.bindless))
658 return;
659
660 unsigned num_params = glsl_get_aoa_size(type);
661 num_params = MAX2(num_params, 1);
662 num_params *= glsl_get_matrix_columns(glsl_without_array(type));
663
664 bool is_dual_slot = glsl_type_is_dual_slot(glsl_without_array(type));
665 if (is_dual_slot)
666 num_params *= 2;
667
668 struct gl_program_parameter_list *params = state->params;
669 int base_index = params->NumParameters;
670 _mesa_reserve_parameter_storage(params, num_params, num_params);
671
672 if (consts->PackedDriverUniformStorage) {
673 for (unsigned i = 0; i < num_params; i++) {
674 unsigned dmul = glsl_type_is_64bit(glsl_without_array(type)) ? 2 : 1;
675 unsigned comps = glsl_get_vector_elements(glsl_without_array(type)) * dmul;
676 if (is_dual_slot) {
677 if (i & 0x1)
678 comps -= 4;
679 else
680 comps = 4;
681 }
682
683 /* TODO: This will waste space with 1 and 3 16-bit components. */
684 if (glsl_type_is_16bit(glsl_without_array(type)))
685 comps = DIV_ROUND_UP(comps, 2);
686
687 _mesa_add_parameter(params, PROGRAM_UNIFORM, uniform->name.string, comps,
688 glsl_get_gl_type(type), NULL, NULL, false);
689 }
690 } else {
691 for (unsigned i = 0; i < num_params; i++) {
692 _mesa_add_parameter(params, PROGRAM_UNIFORM, uniform->name.string, 4,
693 glsl_get_gl_type(type), NULL, NULL, true);
694 }
695 }
696
697 /* Each Parameter will hold the index to the backing uniform storage.
698 * This avoids relying on names to match parameters and uniform
699 * storages.
700 */
701 for (unsigned i = 0; i < num_params; i++) {
702 struct gl_program_parameter *param = ¶ms->Parameters[base_index + i];
703 param->UniformStorageIndex = uniform - prog->data->UniformStorage;
704 param->MainUniformStorageIndex = state->current_var->data.location;
705 }
706 }
707
708 static unsigned
get_next_index(struct nir_link_uniforms_state * state,const struct gl_uniform_storage * uniform,unsigned * next_index,bool * initialised)709 get_next_index(struct nir_link_uniforms_state *state,
710 const struct gl_uniform_storage *uniform,
711 unsigned *next_index, bool *initialised)
712 {
713 /* If we’ve already calculated an index for this member then we can just
714 * offset from there.
715 */
716 if (state->current_type->next_index == UINT_MAX) {
717 /* Otherwise we need to reserve enough indices for all of the arrays
718 * enclosing this member.
719 */
720
721 unsigned array_size = 1;
722
723 for (const struct type_tree_entry *p = state->current_type;
724 p;
725 p = p->parent) {
726 array_size *= p->array_size;
727 }
728
729 state->current_type->next_index = *next_index;
730 *next_index += array_size;
731 *initialised = true;
732 } else
733 *initialised = false;
734
735 unsigned index = state->current_type->next_index;
736
737 state->current_type->next_index += MAX2(1, uniform->array_elements);
738
739 return index;
740 }
741
742 static gl_texture_index
texture_index_for_type(const struct glsl_type * type)743 texture_index_for_type(const struct glsl_type *type)
744 {
745 const bool sampler_array = glsl_sampler_type_is_array(type);
746 switch (glsl_get_sampler_dim(type)) {
747 case GLSL_SAMPLER_DIM_1D:
748 return sampler_array ? TEXTURE_1D_ARRAY_INDEX : TEXTURE_1D_INDEX;
749 case GLSL_SAMPLER_DIM_2D:
750 return sampler_array ? TEXTURE_2D_ARRAY_INDEX : TEXTURE_2D_INDEX;
751 case GLSL_SAMPLER_DIM_3D:
752 return TEXTURE_3D_INDEX;
753 case GLSL_SAMPLER_DIM_CUBE:
754 return sampler_array ? TEXTURE_CUBE_ARRAY_INDEX : TEXTURE_CUBE_INDEX;
755 case GLSL_SAMPLER_DIM_RECT:
756 return TEXTURE_RECT_INDEX;
757 case GLSL_SAMPLER_DIM_BUF:
758 return TEXTURE_BUFFER_INDEX;
759 case GLSL_SAMPLER_DIM_EXTERNAL:
760 return TEXTURE_EXTERNAL_INDEX;
761 case GLSL_SAMPLER_DIM_MS:
762 return sampler_array ? TEXTURE_2D_MULTISAMPLE_ARRAY_INDEX :
763 TEXTURE_2D_MULTISAMPLE_INDEX;
764 default:
765 assert(!"Should not get here.");
766 return TEXTURE_BUFFER_INDEX;
767 }
768 }
769
770 /* Update the uniforms info for the current shader stage */
771 static void
update_uniforms_shader_info(struct gl_shader_program * prog,struct nir_link_uniforms_state * state,struct gl_uniform_storage * uniform,const struct glsl_type * type,unsigned stage)772 update_uniforms_shader_info(struct gl_shader_program *prog,
773 struct nir_link_uniforms_state *state,
774 struct gl_uniform_storage *uniform,
775 const struct glsl_type *type,
776 unsigned stage)
777 {
778 unsigned values = glsl_get_component_slots(type);
779 const struct glsl_type *type_no_array = glsl_without_array(type);
780
781 if (glsl_type_is_sampler(type_no_array)) {
782 bool init_idx;
783 /* ARB_bindless_texture spec says:
784 *
785 * "When used as shader inputs, outputs, uniform block members,
786 * or temporaries, the value of the sampler is a 64-bit unsigned
787 * integer handle and never refers to a texture image unit."
788 */
789 bool is_bindless = state->current_var->data.bindless || state->var_is_in_block;
790 unsigned *next_index = is_bindless ?
791 &state->next_bindless_sampler_index :
792 &state->next_sampler_index;
793 int sampler_index = get_next_index(state, uniform, next_index, &init_idx);
794 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
795
796 if (is_bindless) {
797 if (init_idx) {
798 sh->Program->sh.BindlessSamplers =
799 rerzalloc(sh->Program, sh->Program->sh.BindlessSamplers,
800 struct gl_bindless_sampler,
801 sh->Program->sh.NumBindlessSamplers,
802 state->next_bindless_sampler_index);
803
804 for (unsigned j = sh->Program->sh.NumBindlessSamplers;
805 j < state->next_bindless_sampler_index; j++) {
806 sh->Program->sh.BindlessSamplers[j].target =
807 texture_index_for_type(type_no_array);
808 }
809
810 sh->Program->sh.NumBindlessSamplers =
811 state->next_bindless_sampler_index;
812 }
813
814 if (!state->var_is_in_block)
815 state->num_shader_uniform_components += values;
816 } else {
817 /* Samplers (bound or bindless) are counted as two components
818 * as specified by ARB_bindless_texture.
819 */
820 state->num_shader_samplers += values / 2;
821
822 if (init_idx) {
823 const unsigned shadow = glsl_sampler_type_is_shadow(type_no_array);
824 for (unsigned i = sampler_index;
825 i < MIN2(state->next_sampler_index, MAX_SAMPLERS); i++) {
826 sh->Program->sh.SamplerTargets[i] =
827 texture_index_for_type(type_no_array);
828 state->shader_samplers_used |= 1U << i;
829 state->shader_shadow_samplers |= shadow << i;
830 }
831 }
832 }
833
834 uniform->opaque[stage].active = true;
835 uniform->opaque[stage].index = sampler_index;
836 } else if (glsl_type_is_image(type_no_array)) {
837 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
838
839 /* Set image access qualifiers */
840 enum gl_access_qualifier image_access =
841 state->current_var->data.access;
842
843 int image_index;
844 if (state->current_var->data.bindless) {
845 image_index = state->next_bindless_image_index;
846 state->next_bindless_image_index += MAX2(1, uniform->array_elements);
847
848 sh->Program->sh.BindlessImages =
849 rerzalloc(sh->Program, sh->Program->sh.BindlessImages,
850 struct gl_bindless_image,
851 sh->Program->sh.NumBindlessImages,
852 state->next_bindless_image_index);
853
854 for (unsigned j = sh->Program->sh.NumBindlessImages;
855 j < state->next_bindless_image_index; j++) {
856 sh->Program->sh.BindlessImages[j].image_access = image_access;
857 }
858
859 sh->Program->sh.NumBindlessImages = state->next_bindless_image_index;
860
861 } else {
862 image_index = state->next_image_index;
863 state->next_image_index += MAX2(1, uniform->array_elements);
864
865 /* Images (bound or bindless) are counted as two components as
866 * specified by ARB_bindless_texture.
867 */
868 state->num_shader_images += values / 2;
869
870 for (unsigned i = image_index;
871 i < MIN2(state->next_image_index, MAX_IMAGE_UNIFORMS); i++) {
872 sh->Program->sh.image_access[i] = image_access;
873 }
874 }
875
876 uniform->opaque[stage].active = true;
877 uniform->opaque[stage].index = image_index;
878
879 if (!uniform->is_shader_storage)
880 state->num_shader_uniform_components += values;
881 } else {
882 if (glsl_get_base_type(type_no_array) == GLSL_TYPE_SUBROUTINE) {
883 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
884
885 uniform->opaque[stage].index = state->next_subroutine;
886 uniform->opaque[stage].active = true;
887
888 sh->Program->sh.NumSubroutineUniforms++;
889
890 /* Increment the subroutine index by 1 for non-arrays and by the
891 * number of array elements for arrays.
892 */
893 state->next_subroutine += MAX2(1, uniform->array_elements);
894 }
895
896 if (!state->var_is_in_block)
897 state->num_shader_uniform_components += values;
898 }
899 }
900
901 static bool
find_and_update_named_uniform_storage(const struct gl_constants * consts,struct gl_shader_program * prog,struct nir_link_uniforms_state * state,nir_variable * var,char ** name,size_t name_length,const struct glsl_type * type,unsigned stage,bool * first_element)902 find_and_update_named_uniform_storage(const struct gl_constants *consts,
903 struct gl_shader_program *prog,
904 struct nir_link_uniforms_state *state,
905 nir_variable *var, char **name,
906 size_t name_length,
907 const struct glsl_type *type,
908 unsigned stage, bool *first_element)
909 {
910 /* gl_uniform_storage can cope with one level of array, so if the type is a
911 * composite type or an array where each element occupies more than one
912 * location than we need to recursively process it.
913 */
914 if (glsl_type_is_struct_or_ifc(type) ||
915 (glsl_type_is_array(type) &&
916 (glsl_type_is_array(glsl_get_array_element(type)) ||
917 glsl_type_is_struct_or_ifc(glsl_get_array_element(type))))) {
918
919 struct type_tree_entry *old_type = state->current_type;
920 state->current_type = old_type->children;
921
922 /* Shader storage block unsized arrays: add subscript [0] to variable
923 * names.
924 */
925 unsigned length = glsl_get_length(type);
926 if (glsl_type_is_unsized_array(type))
927 length = 1;
928
929 bool result = false;
930 for (unsigned i = 0; i < length; i++) {
931 const struct glsl_type *field_type;
932 size_t new_length = name_length;
933
934 if (glsl_type_is_struct_or_ifc(type)) {
935 field_type = glsl_get_struct_field(type, i);
936
937 /* Append '.field' to the current variable name. */
938 if (name) {
939 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s",
940 glsl_get_struct_elem_name(type, i));
941 }
942 } else {
943 field_type = glsl_get_array_element(type);
944
945 /* Append the subscript to the current variable name */
946 if (name)
947 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
948 }
949
950 result = find_and_update_named_uniform_storage(consts, prog, state,
951 var, name, new_length,
952 field_type, stage,
953 first_element);
954
955 if (glsl_type_is_struct_or_ifc(type))
956 state->current_type = state->current_type->next_sibling;
957
958 if (!result) {
959 state->current_type = old_type;
960 return false;
961 }
962 }
963
964 state->current_type = old_type;
965
966 return result;
967 } else {
968 struct hash_entry *entry =
969 _mesa_hash_table_search(state->uniform_hash, *name);
970 if (entry) {
971 unsigned i = (unsigned) (intptr_t) entry->data;
972 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
973
974 if (*first_element && !state->var_is_in_block) {
975 *first_element = false;
976 var->data.location = uniform - prog->data->UniformStorage;
977 }
978
979 update_uniforms_shader_info(prog, state, uniform, type, stage);
980
981 const struct glsl_type *type_no_array = glsl_without_array(type);
982 struct hash_entry *entry = prog->data->spirv ? NULL :
983 _mesa_hash_table_search(state->referenced_uniforms[stage],
984 state->current_var->name);
985 if (entry != NULL ||
986 glsl_get_base_type(type_no_array) == GLSL_TYPE_SUBROUTINE ||
987 prog->data->spirv)
988 uniform->active_shader_mask |= 1 << stage;
989
990 if (!state->var_is_in_block)
991 add_parameter(uniform, consts, prog, type, state);
992
993 return true;
994 }
995 }
996
997 return false;
998 }
999
1000 /**
1001 * Finds, returns, and updates the stage info for any uniform in UniformStorage
1002 * defined by @var. For GLSL this is done using the name, for SPIR-V in general
1003 * is this done using the explicit location, except:
1004 *
1005 * * UBOs/SSBOs: as they lack explicit location, binding is used to locate
1006 * them. That means that more that one entry at the uniform storage can be
1007 * found. In that case all of them are updated, and the first entry is
1008 * returned, in order to update the location of the nir variable.
1009 *
1010 * * Special uniforms: like atomic counters. They lack a explicit location,
1011 * so they are skipped. They will be handled and assigned a location later.
1012 *
1013 */
1014 static bool
find_and_update_previous_uniform_storage(const struct gl_constants * consts,struct gl_shader_program * prog,struct nir_link_uniforms_state * state,nir_variable * var,char * name,const struct glsl_type * type,unsigned stage)1015 find_and_update_previous_uniform_storage(const struct gl_constants *consts,
1016 struct gl_shader_program *prog,
1017 struct nir_link_uniforms_state *state,
1018 nir_variable *var, char *name,
1019 const struct glsl_type *type,
1020 unsigned stage)
1021 {
1022 if (!prog->data->spirv) {
1023 bool first_element = true;
1024 char *name_tmp = ralloc_strdup(NULL, name);
1025 bool r = find_and_update_named_uniform_storage(consts, prog, state, var,
1026 &name_tmp,
1027 strlen(name_tmp), type,
1028 stage, &first_element);
1029 ralloc_free(name_tmp);
1030
1031 return r;
1032 }
1033
1034 if (nir_variable_is_in_block(var)) {
1035 struct gl_uniform_storage *uniform = NULL;
1036
1037 ASSERTED unsigned num_blks = nir_variable_is_in_ubo(var) ?
1038 prog->data->NumUniformBlocks :
1039 prog->data->NumShaderStorageBlocks;
1040
1041 struct gl_uniform_block *blks = nir_variable_is_in_ubo(var) ?
1042 prog->data->UniformBlocks : prog->data->ShaderStorageBlocks;
1043
1044 bool result = false;
1045 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1046 /* UniformStorage contains both variables from ubos and ssbos */
1047 if ( prog->data->UniformStorage[i].is_shader_storage !=
1048 nir_variable_is_in_ssbo(var))
1049 continue;
1050
1051 int block_index = prog->data->UniformStorage[i].block_index;
1052 if (block_index != -1) {
1053 assert(block_index < num_blks);
1054
1055 if (var->data.binding == blks[block_index].Binding) {
1056 if (!uniform)
1057 uniform = &prog->data->UniformStorage[i];
1058 mark_stage_as_active(&prog->data->UniformStorage[i],
1059 stage);
1060 result = true;
1061 }
1062 }
1063 }
1064
1065 if (result)
1066 var->data.location = uniform - prog->data->UniformStorage;
1067 return result;
1068 }
1069
1070 /* Beyond blocks, there are still some corner cases of uniforms without
1071 * location (ie: atomic counters) that would have a initial location equal
1072 * to -1. We just return on that case. Those uniforms will be handled
1073 * later.
1074 */
1075 if (var->data.location == -1)
1076 return false;
1077
1078 /* TODO: following search can be problematic with shaders with a lot of
1079 * uniforms. Would it be better to use some type of hash
1080 */
1081 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1082 if (prog->data->UniformStorage[i].remap_location == var->data.location) {
1083 mark_stage_as_active(&prog->data->UniformStorage[i], stage);
1084
1085 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
1086 var->data.location = uniform - prog->data->UniformStorage;
1087 add_parameter(uniform, consts, prog, var->type, state);
1088 return true;
1089 }
1090 }
1091
1092 return false;
1093 }
1094
1095 static struct type_tree_entry *
build_type_tree_for_type(const struct glsl_type * type)1096 build_type_tree_for_type(const struct glsl_type *type)
1097 {
1098 struct type_tree_entry *entry = malloc(sizeof *entry);
1099
1100 entry->array_size = 1;
1101 entry->next_index = UINT_MAX;
1102 entry->children = NULL;
1103 entry->next_sibling = NULL;
1104 entry->parent = NULL;
1105
1106 if (glsl_type_is_array(type)) {
1107 entry->array_size = glsl_get_length(type);
1108 entry->children = build_type_tree_for_type(glsl_get_array_element(type));
1109 entry->children->parent = entry;
1110 } else if (glsl_type_is_struct_or_ifc(type)) {
1111 struct type_tree_entry *last = NULL;
1112
1113 for (unsigned i = 0; i < glsl_get_length(type); i++) {
1114 const struct glsl_type *field_type = glsl_get_struct_field(type, i);
1115 struct type_tree_entry *field_entry =
1116 build_type_tree_for_type(field_type);
1117
1118 if (last == NULL)
1119 entry->children = field_entry;
1120 else
1121 last->next_sibling = field_entry;
1122
1123 field_entry->parent = entry;
1124
1125 last = field_entry;
1126 }
1127 }
1128
1129 return entry;
1130 }
1131
1132 static void
free_type_tree(struct type_tree_entry * entry)1133 free_type_tree(struct type_tree_entry *entry)
1134 {
1135 struct type_tree_entry *p, *next;
1136
1137 for (p = entry->children; p; p = next) {
1138 next = p->next_sibling;
1139 free_type_tree(p);
1140 }
1141
1142 free(entry);
1143 }
1144
1145 static void
hash_free_uniform_name(struct hash_entry * entry)1146 hash_free_uniform_name(struct hash_entry *entry)
1147 {
1148 free((void*)entry->key);
1149 }
1150
1151 static void
enter_record(struct nir_link_uniforms_state * state,const struct gl_constants * consts,const struct glsl_type * type,bool row_major)1152 enter_record(struct nir_link_uniforms_state *state,
1153 const struct gl_constants *consts,
1154 const struct glsl_type *type,
1155 bool row_major)
1156 {
1157 assert(glsl_type_is_struct(type));
1158 if (!state->var_is_in_block)
1159 return;
1160
1161 bool use_std430 = consts->UseSTD430AsDefaultPacking;
1162 const enum glsl_interface_packing packing =
1163 glsl_get_internal_ifc_packing(state->current_var->interface_type,
1164 use_std430);
1165
1166 if (packing == GLSL_INTERFACE_PACKING_STD430)
1167 state->offset = align(
1168 state->offset, glsl_get_std430_base_alignment(type, row_major));
1169 else
1170 state->offset = align(
1171 state->offset, glsl_get_std140_base_alignment(type, row_major));
1172 }
1173
1174 static void
leave_record(struct nir_link_uniforms_state * state,const struct gl_constants * consts,const struct glsl_type * type,bool row_major)1175 leave_record(struct nir_link_uniforms_state *state,
1176 const struct gl_constants *consts,
1177 const struct glsl_type *type,
1178 bool row_major)
1179 {
1180 assert(glsl_type_is_struct(type));
1181 if (!state->var_is_in_block)
1182 return;
1183
1184 bool use_std430 = consts->UseSTD430AsDefaultPacking;
1185 const enum glsl_interface_packing packing =
1186 glsl_get_internal_ifc_packing(state->current_var->interface_type,
1187 use_std430);
1188
1189 if (packing == GLSL_INTERFACE_PACKING_STD430)
1190 state->offset = align(
1191 state->offset, glsl_get_std430_base_alignment(type, row_major));
1192 else
1193 state->offset = align(
1194 state->offset, glsl_get_std140_base_alignment(type, row_major));
1195 }
1196
1197 /**
1198 * Creates the neccessary entries in UniformStorage for the uniform. Returns
1199 * the number of locations used or -1 on failure.
1200 */
1201 static int
nir_link_uniform(const struct gl_constants * consts,struct gl_shader_program * prog,struct gl_program * stage_program,gl_shader_stage stage,const struct glsl_type * type,unsigned index_in_parent,int location,struct nir_link_uniforms_state * state,char ** name,size_t name_length,bool row_major)1202 nir_link_uniform(const struct gl_constants *consts,
1203 struct gl_shader_program *prog,
1204 struct gl_program *stage_program,
1205 gl_shader_stage stage,
1206 const struct glsl_type *type,
1207 unsigned index_in_parent,
1208 int location,
1209 struct nir_link_uniforms_state *state,
1210 char **name, size_t name_length, bool row_major)
1211 {
1212 struct gl_uniform_storage *uniform = NULL;
1213
1214 if (state->set_top_level_array &&
1215 nir_variable_is_in_ssbo(state->current_var)) {
1216 /* Type is the top level SSBO member */
1217 if (glsl_type_is_array(type) &&
1218 (glsl_type_is_array(glsl_get_array_element(type)) ||
1219 glsl_type_is_struct_or_ifc(glsl_get_array_element(type)))) {
1220 /* Type is a top-level array (array of aggregate types) */
1221 state->top_level_array_size = glsl_get_length(type);
1222 state->top_level_array_stride = glsl_get_explicit_stride(type);
1223 } else {
1224 state->top_level_array_size = 1;
1225 state->top_level_array_stride = 0;
1226 }
1227
1228 state->set_top_level_array = false;
1229 }
1230
1231 /* gl_uniform_storage can cope with one level of array, so if the type is a
1232 * composite type or an array where each element occupies more than one
1233 * location than we need to recursively process it.
1234 */
1235 if (glsl_type_is_struct_or_ifc(type) ||
1236 (glsl_type_is_array(type) &&
1237 (glsl_type_is_array(glsl_get_array_element(type)) ||
1238 glsl_type_is_struct_or_ifc(glsl_get_array_element(type))))) {
1239 int location_count = 0;
1240 struct type_tree_entry *old_type = state->current_type;
1241 unsigned int struct_base_offset = state->offset;
1242
1243 state->current_type = old_type->children;
1244
1245 /* Shader storage block unsized arrays: add subscript [0] to variable
1246 * names.
1247 */
1248 unsigned length = glsl_get_length(type);
1249 if (glsl_type_is_unsized_array(type))
1250 length = 1;
1251
1252 if (glsl_type_is_struct(type) && !prog->data->spirv)
1253 enter_record(state, consts, type, row_major);
1254
1255 for (unsigned i = 0; i < length; i++) {
1256 const struct glsl_type *field_type;
1257 size_t new_length = name_length;
1258 bool field_row_major = row_major;
1259
1260 if (glsl_type_is_struct_or_ifc(type)) {
1261 field_type = glsl_get_struct_field(type, i);
1262 /* Use the offset inside the struct only for variables backed by
1263 * a buffer object. For variables not backed by a buffer object,
1264 * offset is -1.
1265 */
1266 if (state->var_is_in_block) {
1267 if (prog->data->spirv) {
1268 state->offset =
1269 struct_base_offset + glsl_get_struct_field_offset(type, i);
1270 } else if (glsl_get_struct_field_offset(type, i) != -1 &&
1271 type == state->current_ifc_type) {
1272 state->offset = glsl_get_struct_field_offset(type, i);
1273 }
1274
1275 if (glsl_type_is_interface(type))
1276 state->set_top_level_array = true;
1277 }
1278
1279 /* Append '.field' to the current variable name. */
1280 if (name) {
1281 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s",
1282 glsl_get_struct_elem_name(type, i));
1283 }
1284
1285
1286 /* The layout of structures at the top level of the block is set
1287 * during parsing. For matrices contained in multiple levels of
1288 * structures in the block, the inner structures have no layout.
1289 * These cases must potentially inherit the layout from the outer
1290 * levels.
1291 */
1292 const enum glsl_matrix_layout matrix_layout =
1293 glsl_get_struct_field_data(type, i)->matrix_layout;
1294 if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
1295 field_row_major = true;
1296 } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
1297 field_row_major = false;
1298 }
1299 } else {
1300 field_type = glsl_get_array_element(type);
1301
1302 /* Append the subscript to the current variable name */
1303 if (name)
1304 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
1305 }
1306
1307 int entries = nir_link_uniform(consts, prog, stage_program, stage,
1308 field_type, i, location,
1309 state, name, new_length,
1310 field_row_major);
1311
1312 if (entries == -1)
1313 return -1;
1314
1315 if (location != -1)
1316 location += entries;
1317 location_count += entries;
1318
1319 if (glsl_type_is_struct_or_ifc(type))
1320 state->current_type = state->current_type->next_sibling;
1321 }
1322
1323 if (glsl_type_is_struct(type) && !prog->data->spirv)
1324 leave_record(state, consts, type, row_major);
1325
1326 state->current_type = old_type;
1327
1328 return location_count;
1329 } else {
1330 /* TODO: reallocating storage is slow, we should figure out a way to
1331 * allocate storage up front for spirv like we do for GLSL.
1332 */
1333 if (prog->data->spirv) {
1334 /* Create a new uniform storage entry */
1335 prog->data->UniformStorage =
1336 reralloc(prog->data,
1337 prog->data->UniformStorage,
1338 struct gl_uniform_storage,
1339 prog->data->NumUniformStorage + 1);
1340 if (!prog->data->UniformStorage) {
1341 linker_error(prog, "Out of memory during linking.\n");
1342 return -1;
1343 }
1344 }
1345
1346 uniform = &prog->data->UniformStorage[prog->data->NumUniformStorage];
1347 prog->data->NumUniformStorage++;
1348
1349 /* Initialize its members */
1350 memset(uniform, 0x00, sizeof(struct gl_uniform_storage));
1351
1352 uniform->name.string =
1353 name ? ralloc_strdup(prog->data->UniformStorage, *name) : NULL;
1354 resource_name_updated(&uniform->name);
1355
1356 const struct glsl_type *type_no_array = glsl_without_array(type);
1357 if (glsl_type_is_array(type)) {
1358 uniform->type = type_no_array;
1359 uniform->array_elements = glsl_get_length(type);
1360 } else {
1361 uniform->type = type;
1362 uniform->array_elements = 0;
1363 }
1364 uniform->top_level_array_size = state->top_level_array_size;
1365 uniform->top_level_array_stride = state->top_level_array_stride;
1366
1367 struct hash_entry *entry = prog->data->spirv ? NULL :
1368 _mesa_hash_table_search(state->referenced_uniforms[stage],
1369 state->current_var->name);
1370 if (entry != NULL ||
1371 glsl_get_base_type(type_no_array) == GLSL_TYPE_SUBROUTINE ||
1372 prog->data->spirv)
1373 uniform->active_shader_mask |= 1 << stage;
1374
1375 if (location >= 0) {
1376 /* Uniform has an explicit location */
1377 uniform->remap_location = location;
1378 } else {
1379 uniform->remap_location = UNMAPPED_UNIFORM_LOC;
1380 }
1381
1382 uniform->hidden = state->current_var->data.how_declared == nir_var_hidden;
1383 if (uniform->hidden)
1384 state->num_hidden_uniforms++;
1385
1386 uniform->is_shader_storage = nir_variable_is_in_ssbo(state->current_var);
1387 uniform->is_bindless = state->current_var->data.bindless;
1388
1389 /* Set fields whose default value depend on the variable being inside a
1390 * block.
1391 *
1392 * From the OpenGL 4.6 spec, 7.3 Program objects:
1393 *
1394 * "For the property ARRAY_STRIDE, ... For active variables not declared
1395 * as an array of basic types, zero is written to params. For active
1396 * variables not backed by a buffer object, -1 is written to params,
1397 * regardless of the variable type."
1398 *
1399 * "For the property MATRIX_STRIDE, ... For active variables not declared
1400 * as a matrix or array of matrices, zero is written to params. For active
1401 * variables not backed by a buffer object, -1 is written to params,
1402 * regardless of the variable type."
1403 *
1404 * For the property IS_ROW_MAJOR, ... For active variables backed by a
1405 * buffer object, declared as a single matrix or array of matrices, and
1406 * stored in row-major order, one is written to params. For all other
1407 * active variables, zero is written to params.
1408 */
1409 uniform->array_stride = -1;
1410 uniform->matrix_stride = -1;
1411 uniform->row_major = false;
1412
1413 if (state->var_is_in_block) {
1414 uniform->array_stride = glsl_type_is_array(type) ?
1415 glsl_get_explicit_stride(type) : 0;
1416
1417 if (glsl_type_is_matrix(uniform->type)) {
1418 uniform->matrix_stride = glsl_get_explicit_stride(uniform->type);
1419 uniform->row_major = glsl_matrix_type_is_row_major(uniform->type);
1420 } else {
1421 uniform->matrix_stride = 0;
1422 }
1423
1424 if (!prog->data->spirv) {
1425 bool use_std430 = consts->UseSTD430AsDefaultPacking;
1426 const enum glsl_interface_packing packing =
1427 glsl_get_internal_ifc_packing(state->current_var->interface_type,
1428 use_std430);
1429
1430 unsigned alignment =
1431 glsl_get_std140_base_alignment(type, uniform->row_major);
1432 if (packing == GLSL_INTERFACE_PACKING_STD430) {
1433 alignment =
1434 glsl_get_std430_base_alignment(type, uniform->row_major);
1435 }
1436 state->offset = align(state->offset, alignment);
1437 }
1438 }
1439
1440 uniform->offset = state->var_is_in_block ? state->offset : -1;
1441
1442 int buffer_block_index = -1;
1443 /* If the uniform is inside a uniform block determine its block index by
1444 * comparing the bindings, we can not use names.
1445 */
1446 if (state->var_is_in_block) {
1447 struct gl_uniform_block *blocks = nir_variable_is_in_ssbo(state->current_var) ?
1448 prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
1449
1450 int num_blocks = nir_variable_is_in_ssbo(state->current_var) ?
1451 prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
1452
1453 if (!prog->data->spirv) {
1454 bool is_interface_array =
1455 glsl_without_array(state->current_var->type) == state->current_var->interface_type &&
1456 glsl_type_is_array(state->current_var->type);
1457
1458 const char *ifc_name =
1459 glsl_get_type_name(state->current_var->interface_type);
1460 if (is_interface_array) {
1461 unsigned l = strlen(ifc_name);
1462 for (unsigned i = 0; i < num_blocks; i++) {
1463 if (strncmp(ifc_name, blocks[i].name.string, l) == 0 &&
1464 blocks[i].name.string[l] == '[') {
1465 buffer_block_index = i;
1466 break;
1467 }
1468 }
1469 } else {
1470 for (unsigned i = 0; i < num_blocks; i++) {
1471 if (strcmp(ifc_name, blocks[i].name.string) == 0) {
1472 buffer_block_index = i;
1473 break;
1474 }
1475 }
1476 }
1477
1478 /* Compute the next offset. */
1479 bool use_std430 = consts->UseSTD430AsDefaultPacking;
1480 const enum glsl_interface_packing packing =
1481 glsl_get_internal_ifc_packing(state->current_var->interface_type,
1482 use_std430);
1483 if (packing == GLSL_INTERFACE_PACKING_STD430)
1484 state->offset += glsl_get_std430_size(type, uniform->row_major);
1485 else
1486 state->offset += glsl_get_std140_size(type, uniform->row_major);
1487 } else {
1488 for (unsigned i = 0; i < num_blocks; i++) {
1489 if (state->current_var->data.binding == blocks[i].Binding) {
1490 buffer_block_index = i;
1491 break;
1492 }
1493 }
1494
1495 /* Compute the next offset. */
1496 state->offset += glsl_get_explicit_size(type, true);
1497 }
1498 assert(buffer_block_index >= 0);
1499 }
1500
1501 uniform->block_index = buffer_block_index;
1502 uniform->builtin = is_gl_identifier(uniform->name.string);
1503 uniform->atomic_buffer_index = -1;
1504
1505 /* The following are not for features not supported by ARB_gl_spirv */
1506 uniform->num_compatible_subroutines = 0;
1507
1508 unsigned entries = MAX2(1, uniform->array_elements);
1509 unsigned values = glsl_get_component_slots(type);
1510
1511 update_uniforms_shader_info(prog, state, uniform, type, stage);
1512
1513 if (uniform->remap_location != UNMAPPED_UNIFORM_LOC &&
1514 state->max_uniform_location < uniform->remap_location + entries)
1515 state->max_uniform_location = uniform->remap_location + entries;
1516
1517 if (!state->var_is_in_block)
1518 add_parameter(uniform, consts, prog, type, state);
1519
1520 if (name) {
1521 _mesa_hash_table_insert(state->uniform_hash, strdup(*name),
1522 (void *) (intptr_t)
1523 (prog->data->NumUniformStorage - 1));
1524 }
1525
1526 if (!is_gl_identifier(uniform->name.string) && !uniform->is_shader_storage &&
1527 !state->var_is_in_block)
1528 state->num_values += values;
1529
1530 return MAX2(uniform->array_elements, 1);
1531 }
1532 }
1533
1534 bool
gl_nir_link_uniforms(const struct gl_constants * consts,struct gl_shader_program * prog,bool fill_parameters)1535 gl_nir_link_uniforms(const struct gl_constants *consts,
1536 struct gl_shader_program *prog,
1537 bool fill_parameters)
1538 {
1539 /* First free up any previous UniformStorage items */
1540 ralloc_free(prog->data->UniformStorage);
1541 prog->data->UniformStorage = NULL;
1542 prog->data->NumUniformStorage = 0;
1543
1544 /* Iterate through all linked shaders */
1545 struct nir_link_uniforms_state state = {0,};
1546
1547 if (!prog->data->spirv) {
1548 /* Gather information on uniform use */
1549 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
1550 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
1551 if (!sh)
1552 continue;
1553
1554 state.referenced_uniforms[stage] =
1555 _mesa_hash_table_create(NULL, _mesa_hash_string,
1556 _mesa_key_string_equal);
1557
1558 nir_shader *nir = sh->Program->nir;
1559 add_var_use_shader(nir, state.referenced_uniforms[stage]);
1560 }
1561
1562 if(!consts->DisableUniformArrayResize) {
1563 /* Resize uniform arrays based on the maximum array index */
1564 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
1565 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
1566 if (!sh)
1567 continue;
1568
1569 nir_foreach_gl_uniform_variable(var, sh->Program->nir)
1570 update_array_sizes(prog, var, state.referenced_uniforms, stage);
1571 }
1572 }
1573 }
1574
1575 /* Count total number of uniforms and allocate storage */
1576 unsigned storage_size = 0;
1577 if (!prog->data->spirv) {
1578 struct set *storage_counted =
1579 _mesa_set_create(NULL, _mesa_hash_string, _mesa_key_string_equal);
1580 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
1581 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
1582 if (!sh)
1583 continue;
1584
1585 nir_foreach_gl_uniform_variable(var, sh->Program->nir) {
1586 const struct glsl_type *type = var->type;
1587 const char *name = var->name;
1588 if (nir_variable_is_in_block(var) &&
1589 glsl_without_array(type) == var->interface_type) {
1590 type = glsl_without_array(var->type);
1591 name = glsl_get_type_name(type);
1592 }
1593
1594 struct set_entry *entry = _mesa_set_search(storage_counted, name);
1595 if (!entry) {
1596 storage_size += uniform_storage_size(type);
1597 _mesa_set_add(storage_counted, name);
1598 }
1599 }
1600 }
1601 _mesa_set_destroy(storage_counted, NULL);
1602
1603 prog->data->UniformStorage = rzalloc_array(prog->data,
1604 struct gl_uniform_storage,
1605 storage_size);
1606 if (!prog->data->UniformStorage) {
1607 linker_error(prog, "Out of memory while linking uniforms.\n");
1608 return false;
1609 }
1610 }
1611
1612 /* Iterate through all linked shaders */
1613 state.uniform_hash = _mesa_hash_table_create(NULL, _mesa_hash_string,
1614 _mesa_key_string_equal);
1615
1616 for (unsigned shader_type = 0; shader_type < MESA_SHADER_STAGES; shader_type++) {
1617 struct gl_linked_shader *sh = prog->_LinkedShaders[shader_type];
1618 if (!sh)
1619 continue;
1620
1621 nir_shader *nir = sh->Program->nir;
1622 assert(nir);
1623
1624 state.next_bindless_image_index = 0;
1625 state.next_bindless_sampler_index = 0;
1626 state.next_image_index = 0;
1627 state.next_sampler_index = 0;
1628 state.num_shader_samplers = 0;
1629 state.num_shader_images = 0;
1630 state.num_shader_uniform_components = 0;
1631 state.shader_storage_blocks_write_access = 0;
1632 state.shader_samplers_used = 0;
1633 state.shader_shadow_samplers = 0;
1634 state.params = fill_parameters ? sh->Program->Parameters : NULL;
1635
1636 nir_foreach_gl_uniform_variable(var, nir) {
1637 state.current_var = var;
1638 state.current_ifc_type = NULL;
1639 state.offset = 0;
1640 state.var_is_in_block = nir_variable_is_in_block(var);
1641 state.set_top_level_array = false;
1642 state.top_level_array_size = 0;
1643 state.top_level_array_stride = 0;
1644
1645 /*
1646 * From ARB_program_interface spec, issue (16):
1647 *
1648 * "RESOLVED: We will follow the default rule for enumerating block
1649 * members in the OpenGL API, which is:
1650 *
1651 * * If a variable is a member of an interface block without an
1652 * instance name, it is enumerated using just the variable name.
1653 *
1654 * * If a variable is a member of an interface block with an
1655 * instance name, it is enumerated as "BlockName.Member", where
1656 * "BlockName" is the name of the interface block (not the
1657 * instance name) and "Member" is the name of the variable.
1658 *
1659 * For example, in the following code:
1660 *
1661 * uniform Block1 {
1662 * int member1;
1663 * };
1664 * uniform Block2 {
1665 * int member2;
1666 * } instance2;
1667 * uniform Block3 {
1668 * int member3;
1669 * } instance3[2]; // uses two separate buffer bindings
1670 *
1671 * the three uniforms (if active) are enumerated as "member1",
1672 * "Block2.member2", and "Block3.member3"."
1673 *
1674 * Note that in the last example, with an array of ubo, only one
1675 * uniform is generated. For that reason, while unrolling the
1676 * uniforms of a ubo, or the variables of a ssbo, we need to treat
1677 * arrays of instance as a single block.
1678 */
1679 char *name;
1680 const struct glsl_type *type = var->type;
1681 if (state.var_is_in_block &&
1682 ((!prog->data->spirv && glsl_without_array(type) == var->interface_type) ||
1683 (prog->data->spirv && type == var->interface_type))) {
1684 type = glsl_without_array(var->type);
1685 state.current_ifc_type = type;
1686 name = ralloc_strdup(NULL, glsl_get_type_name(type));
1687 } else {
1688 state.set_top_level_array = true;
1689 name = ralloc_strdup(NULL, var->name);
1690 }
1691
1692 struct type_tree_entry *type_tree =
1693 build_type_tree_for_type(type);
1694 state.current_type = type_tree;
1695
1696 int location = var->data.location;
1697
1698 struct gl_uniform_block *blocks = NULL;
1699 int num_blocks = 0;
1700 int buffer_block_index = -1;
1701 bool is_interface_array = false;
1702 if (state.var_is_in_block) {
1703 /* If the uniform is inside a uniform block determine its block index by
1704 * comparing the bindings, we can not use names.
1705 */
1706 blocks = nir_variable_is_in_ssbo(state.current_var) ?
1707 prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
1708 num_blocks = nir_variable_is_in_ssbo(state.current_var) ?
1709 prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
1710
1711 is_interface_array =
1712 glsl_without_array(state.current_var->type) == state.current_var->interface_type &&
1713 glsl_type_is_array(state.current_var->type);
1714
1715 const char *ifc_name =
1716 glsl_get_type_name(state.current_var->interface_type);
1717
1718 if (is_interface_array && !prog->data->spirv) {
1719 unsigned l = strlen(ifc_name);
1720
1721 /* Even when a match is found, do not "break" here. As this is
1722 * an array of instances, all elements of the array need to be
1723 * marked as referenced.
1724 */
1725 for (unsigned i = 0; i < num_blocks; i++) {
1726 if (strncmp(ifc_name, blocks[i].name.string, l) == 0 &&
1727 blocks[i].name.string[l] == '[') {
1728 if (buffer_block_index == -1)
1729 buffer_block_index = i;
1730
1731 struct hash_entry *entry =
1732 _mesa_hash_table_search(state.referenced_uniforms[shader_type],
1733 var->name);
1734 if (entry) {
1735 struct uniform_array_info *ainfo =
1736 (struct uniform_array_info *) entry->data;
1737 if (BITSET_TEST(ainfo->indices, blocks[i].linearized_array_index))
1738 blocks[i].stageref |= 1U << shader_type;
1739 }
1740 }
1741 }
1742 } else {
1743 for (unsigned i = 0; i < num_blocks; i++) {
1744 bool match = false;
1745 if (!prog->data->spirv) {
1746 match = strcmp(ifc_name, blocks[i].name.string) == 0;
1747 } else {
1748 match = var->data.binding == blocks[i].Binding;
1749 }
1750 if (match) {
1751 buffer_block_index = i;
1752
1753 if (!prog->data->spirv) {
1754 struct hash_entry *entry =
1755 _mesa_hash_table_search(state.referenced_uniforms[shader_type],
1756 var->name);
1757 if (entry)
1758 blocks[i].stageref |= 1U << shader_type;
1759 }
1760
1761 break;
1762 }
1763 }
1764 }
1765 }
1766
1767 if (nir_variable_is_in_ssbo(var) &&
1768 !(var->data.access & ACCESS_NON_WRITEABLE)) {
1769 unsigned array_size = is_interface_array ?
1770 glsl_get_length(var->type) : 1;
1771
1772 STATIC_ASSERT(MAX_SHADER_STORAGE_BUFFERS <= 32);
1773
1774 /* Buffers from each stage are pointers to the one stored in the program. We need
1775 * to account for this before computing the mask below otherwise the mask will be
1776 * incorrect.
1777 * sh->Program->sh.SSBlocks: [a][b][c][d][e][f]
1778 * VS prog->data->SSBlocks : [a][b][c]
1779 * FS prog->data->SSBlocks : [d][e][f]
1780 * eg for FS buffer 1, buffer_block_index will be 4 but sh_block_index will be 1.
1781 */
1782 int base = 0;
1783 base = sh->Program->sh.ShaderStorageBlocks[0] - prog->data->ShaderStorageBlocks;
1784
1785 assert(base >= 0);
1786
1787 int sh_block_index = buffer_block_index - base;
1788 /* Shaders that use too many SSBOs will fail to compile, which
1789 * we don't care about.
1790 *
1791 * This is true for shaders that do not use too many SSBOs:
1792 */
1793 if (sh_block_index + array_size <= 32) {
1794 state.shader_storage_blocks_write_access |=
1795 u_bit_consecutive(sh_block_index, array_size);
1796 }
1797 }
1798
1799 if (blocks && !prog->data->spirv && state.var_is_in_block) {
1800 if (glsl_without_array(state.current_var->type) != state.current_var->interface_type) {
1801 /* this is nested at some offset inside the block */
1802 bool found = false;
1803 char sentinel = '\0';
1804
1805 if (glsl_type_is_struct(state.current_var->type)) {
1806 sentinel = '.';
1807 } else if (glsl_type_is_array(state.current_var->type) &&
1808 (glsl_type_is_array(glsl_get_array_element(state.current_var->type))
1809 || glsl_type_is_struct(glsl_without_array(state.current_var->type)))) {
1810 sentinel = '[';
1811 }
1812
1813 const unsigned l = strlen(state.current_var->name);
1814 for (unsigned i = 0; i < num_blocks; i++) {
1815 for (unsigned j = 0; j < blocks[i].NumUniforms; j++) {
1816 if (sentinel) {
1817 const char *begin = blocks[i].Uniforms[j].Name;
1818 const char *end = strchr(begin, sentinel);
1819
1820 if (end == NULL)
1821 continue;
1822
1823 if ((ptrdiff_t) l != (end - begin))
1824 continue;
1825 found = strncmp(state.current_var->name, begin, l) == 0;
1826 } else {
1827 found = strcmp(state.current_var->name, blocks[i].Uniforms[j].Name) == 0;
1828 }
1829
1830 if (found) {
1831 location = j;
1832
1833 struct hash_entry *entry =
1834 _mesa_hash_table_search(state.referenced_uniforms[shader_type], var->name);
1835 if (entry)
1836 blocks[i].stageref |= 1U << shader_type;
1837
1838 break;
1839 }
1840 }
1841
1842 if (found)
1843 break;
1844 }
1845 assert(found);
1846 var->data.location = location;
1847 } else {
1848 /* this is the base block offset */
1849 var->data.location = buffer_block_index;
1850 location = 0;
1851 }
1852 assert(buffer_block_index >= 0);
1853 const struct gl_uniform_block *const block =
1854 &blocks[buffer_block_index];
1855 assert(location >= 0 && location < block->NumUniforms);
1856
1857 const struct gl_uniform_buffer_variable *const ubo_var =
1858 &block->Uniforms[location];
1859
1860 state.offset = ubo_var->Offset;
1861 }
1862
1863 /* Check if the uniform has been processed already for
1864 * other stage. If so, validate they are compatible and update
1865 * the active stage mask.
1866 */
1867 if (find_and_update_previous_uniform_storage(consts, prog, &state, var,
1868 name, type, shader_type)) {
1869 ralloc_free(name);
1870 free_type_tree(type_tree);
1871 continue;
1872 }
1873
1874 /* From now on the variable’s location will be its uniform index */
1875 if (!state.var_is_in_block)
1876 var->data.location = prog->data->NumUniformStorage;
1877 else
1878 location = -1;
1879
1880 bool row_major =
1881 var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
1882 int res = nir_link_uniform(consts, prog, sh->Program, shader_type, type,
1883 0, location,
1884 &state,
1885 !prog->data->spirv ? &name : NULL,
1886 !prog->data->spirv ? strlen(name) : 0,
1887 row_major);
1888
1889 free_type_tree(type_tree);
1890 ralloc_free(name);
1891
1892 if (res == -1)
1893 return false;
1894 }
1895
1896 if (!prog->data->spirv) {
1897 _mesa_hash_table_destroy(state.referenced_uniforms[shader_type],
1898 NULL);
1899 }
1900
1901 if (state.num_shader_samplers >
1902 consts->Program[shader_type].MaxTextureImageUnits) {
1903 linker_error(prog, "Too many %s shader texture samplers\n",
1904 _mesa_shader_stage_to_string(shader_type));
1905 continue;
1906 }
1907
1908 if (state.num_shader_images >
1909 consts->Program[shader_type].MaxImageUniforms) {
1910 linker_error(prog, "Too many %s shader image uniforms (%u > %u)\n",
1911 _mesa_shader_stage_to_string(shader_type),
1912 state.num_shader_images,
1913 consts->Program[shader_type].MaxImageUniforms);
1914 continue;
1915 }
1916
1917 sh->Program->SamplersUsed = state.shader_samplers_used;
1918 sh->Program->sh.ShaderStorageBlocksWriteAccess =
1919 state.shader_storage_blocks_write_access;
1920 sh->shadow_samplers = state.shader_shadow_samplers;
1921 sh->Program->info.num_textures = state.num_shader_samplers;
1922 sh->Program->info.num_images = state.num_shader_images;
1923 sh->num_uniform_components = state.num_shader_uniform_components;
1924 sh->num_combined_uniform_components = sh->num_uniform_components;
1925 }
1926
1927 prog->data->NumHiddenUniforms = state.num_hidden_uniforms;
1928 prog->data->NumUniformDataSlots = state.num_values;
1929
1930 assert(prog->data->spirv || prog->data->NumUniformStorage == storage_size);
1931
1932 if (prog->data->spirv)
1933 prog->NumUniformRemapTable = state.max_uniform_location;
1934
1935 nir_setup_uniform_remap_tables(consts, prog);
1936 gl_nir_set_uniform_initializers(consts, prog);
1937
1938 _mesa_hash_table_destroy(state.uniform_hash, hash_free_uniform_name);
1939
1940 return true;
1941 }
1942