1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 /*
25 * Remap load_uniform intrinsics to nir_load_ubo or nir_load_ubo_vec4 accesses
26 * of UBO binding point 0. Simultaneously, remap existing UBO accesses by
27 * increasing their binding point by 1.
28 *
29 * For PIPE_CAP_PACKED_UNIFORMS, dword_packed should be set to indicate that
30 * nir_intrinsic_load_uniform is in increments of dwords instead of vec4s.
31 *
32 * If load_vec4 is set, then nir_intrinsic_load_ubo_vec4 will be generated
33 * instead of nir_intrinsic_load_ubo, saving addressing math for hardawre
34 * needing aligned vec4 loads in increments of vec4s (such as TGSI CONST file
35 * loads).
36 */
37
38 #include "nir.h"
39 #include "nir_builder.h"
40
41 struct nir_lower_uniforms_to_ubo_state {
42 bool dword_packed;
43 bool load_vec4;
44 };
45
46 static bool
nir_lower_uniforms_to_ubo_instr(nir_builder * b,nir_instr * instr,void * data)47 nir_lower_uniforms_to_ubo_instr(nir_builder *b, nir_instr *instr, void *data)
48 {
49 struct nir_lower_uniforms_to_ubo_state *state = data;
50
51 if (instr->type != nir_instr_type_intrinsic)
52 return false;
53
54 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
55
56 b->cursor = nir_before_instr(&intr->instr);
57
58 /* Increase all UBO binding points by 1. */
59 if (intr->intrinsic == nir_intrinsic_load_ubo &&
60 !b->shader->info.first_ubo_is_default_ubo) {
61 nir_def *old_idx = intr->src[0].ssa;
62 nir_def *new_idx = nir_iadd_imm(b, old_idx, 1);
63 nir_src_rewrite(&intr->src[0], new_idx);
64 return true;
65 }
66
67 if (intr->intrinsic == nir_intrinsic_load_uniform) {
68 nir_def *ubo_idx = nir_imm_int(b, 0);
69 nir_def *uniform_offset = intr->src[0].ssa;
70
71 assert(intr->def.bit_size >= 8);
72 nir_def *load_result;
73 if (state->load_vec4) {
74 /* No asking us to generate load_vec4 when you've packed your uniforms
75 * as dwords instead of vec4s.
76 */
77 assert(!state->dword_packed);
78 load_result = nir_load_ubo_vec4(b, intr->num_components, intr->def.bit_size,
79 ubo_idx, uniform_offset, .base = nir_intrinsic_base(intr));
80 } else {
81 /* For PIPE_CAP_PACKED_UNIFORMS, the uniforms are packed with the
82 * base/offset in dword units instead of vec4 units.
83 */
84 int multiplier = state->dword_packed ? 4 : 16;
85 load_result = nir_load_ubo(b, intr->num_components, intr->def.bit_size,
86 ubo_idx,
87 nir_iadd_imm(b, nir_imul_imm(b, uniform_offset, multiplier),
88 nir_intrinsic_base(intr) * multiplier));
89 nir_intrinsic_instr *load = nir_instr_as_intrinsic(load_result->parent_instr);
90
91 /* If it's const, set the alignment to our known constant offset. If
92 * not, set it to a pessimistic value based on the multiplier (or the
93 * scalar size, for qword loads).
94 *
95 * We could potentially set up stricter alignments for indirects by
96 * knowing what features are enabled in the APIs (see comment in
97 * nir_lower_ubo_vec4.c)
98 */
99 if (nir_src_is_const(intr->src[0])) {
100 nir_intrinsic_set_align(load, NIR_ALIGN_MUL_MAX,
101 (nir_src_as_uint(intr->src[0]) +
102 nir_intrinsic_base(intr) * multiplier) %
103 NIR_ALIGN_MUL_MAX);
104 } else {
105 nir_intrinsic_set_align(load, MAX2(multiplier, intr->def.bit_size / 8), 0);
106 }
107
108 nir_intrinsic_set_range_base(load, nir_intrinsic_base(intr) * multiplier);
109 nir_intrinsic_set_range(load, nir_intrinsic_range(intr) * multiplier);
110 }
111 nir_def_replace(&intr->def, load_result);
112 return true;
113 }
114
115 return false;
116 }
117
118 bool
nir_lower_uniforms_to_ubo(nir_shader * shader,bool dword_packed,bool load_vec4)119 nir_lower_uniforms_to_ubo(nir_shader *shader, bool dword_packed, bool load_vec4)
120 {
121 bool progress = false;
122
123 struct nir_lower_uniforms_to_ubo_state state = {
124 .dword_packed = dword_packed,
125 .load_vec4 = load_vec4,
126 };
127
128 progress = nir_shader_instructions_pass(shader,
129 nir_lower_uniforms_to_ubo_instr,
130 nir_metadata_control_flow,
131 &state);
132
133 if (progress) {
134 if (!shader->info.first_ubo_is_default_ubo) {
135 nir_foreach_variable_with_modes(var, shader, nir_var_mem_ubo) {
136 var->data.binding++;
137 if (var->data.driver_location != -1)
138 var->data.driver_location++;
139 /* only increment location for ubo arrays */
140 if (glsl_without_array(var->type) == var->interface_type &&
141 glsl_type_is_array(var->type))
142 var->data.location++;
143 }
144 }
145 shader->info.num_ubos++;
146
147 if (shader->num_uniforms > 0) {
148 const struct glsl_type *type = glsl_array_type(glsl_vec4_type(),
149 shader->num_uniforms, 16);
150 nir_variable *ubo = nir_variable_create(shader, nir_var_mem_ubo, type,
151 "uniform_0");
152 ubo->data.binding = 0;
153 ubo->data.explicit_binding = 1;
154
155 struct glsl_struct_field field = {
156 .type = type,
157 .name = "data",
158 .location = -1,
159 };
160 ubo->interface_type =
161 glsl_interface_type(&field, 1, GLSL_INTERFACE_PACKING_STD430,
162 false, "__ubo0_interface");
163 }
164 }
165
166 shader->info.first_ubo_is_default_ubo = true;
167 return progress;
168 }
169