1 /*
2 * Copyright (c) 2019 Zodiac Inflight Innovations
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jonathan Marek <[email protected]>
25 */
26
27 #include "etnaviv_compiler_nir.h"
28 #include "util/register_allocate.h"
29
30 /* use "r63.z" for depth reg, it will wrap around to r0.z by reg_get_base
31 * (fs registers are offset by 1 to avoid reserving r0)
32 */
33 #define REG_FRAG_DEPTH ((ETNA_MAX_TEMPS - 1) * NUM_REG_TYPES + REG_TYPE_VIRT_SCALAR_Z)
34
35 /* precomputed by register_allocate */
36 static unsigned int *q_values[] = {
37 (unsigned int[]) {1, 2, 3, 4, 2, 2, 3, },
38 (unsigned int[]) {3, 5, 6, 6, 5, 5, 6, },
39 (unsigned int[]) {3, 4, 4, 4, 4, 4, 4, },
40 (unsigned int[]) {1, 1, 1, 1, 1, 1, 1, },
41 (unsigned int[]) {1, 2, 2, 2, 1, 2, 2, },
42 (unsigned int[]) {2, 3, 3, 3, 2, 3, 3, },
43 (unsigned int[]) {2, 2, 2, 2, 2, 2, 2, },
44 };
45
reg_get_class(int virt_reg)46 static inline int reg_get_class(int virt_reg)
47 {
48 switch (reg_get_type(virt_reg)) {
49 case REG_TYPE_VEC4:
50 return REG_CLASS_VEC4;
51 case REG_TYPE_VIRT_VEC3_XYZ:
52 case REG_TYPE_VIRT_VEC3_XYW:
53 case REG_TYPE_VIRT_VEC3_XZW:
54 case REG_TYPE_VIRT_VEC3_YZW:
55 return REG_CLASS_VIRT_VEC3;
56 case REG_TYPE_VIRT_VEC2_XY:
57 case REG_TYPE_VIRT_VEC2_XZ:
58 case REG_TYPE_VIRT_VEC2_XW:
59 case REG_TYPE_VIRT_VEC2_YZ:
60 case REG_TYPE_VIRT_VEC2_YW:
61 case REG_TYPE_VIRT_VEC2_ZW:
62 return REG_CLASS_VIRT_VEC2;
63 case REG_TYPE_VIRT_SCALAR_X:
64 case REG_TYPE_VIRT_SCALAR_Y:
65 case REG_TYPE_VIRT_SCALAR_Z:
66 case REG_TYPE_VIRT_SCALAR_W:
67 return REG_CLASS_VIRT_SCALAR;
68 case REG_TYPE_VIRT_VEC2T_XY:
69 case REG_TYPE_VIRT_VEC2T_ZW:
70 return REG_CLASS_VIRT_VEC2T;
71 case REG_TYPE_VIRT_VEC2C_XY:
72 case REG_TYPE_VIRT_VEC2C_YZ:
73 case REG_TYPE_VIRT_VEC2C_ZW:
74 return REG_CLASS_VIRT_VEC2C;
75 case REG_TYPE_VIRT_VEC3C_XYZ:
76 case REG_TYPE_VIRT_VEC3C_YZW:
77 return REG_CLASS_VIRT_VEC3C;
78 }
79
80 assert(false);
81 return 0;
82 }
83
84 struct ra_regs *
etna_ra_setup(void * mem_ctx)85 etna_ra_setup(void *mem_ctx)
86 {
87 struct ra_regs *regs = ra_alloc_reg_set(mem_ctx, ETNA_MAX_TEMPS *
88 NUM_REG_TYPES, false);
89
90 /* classes always be created from index 0, so equal to the class enum
91 * which represents a register with (c+1) components
92 */
93 struct ra_class *classes[NUM_REG_CLASSES];
94 for (int c = 0; c < NUM_REG_CLASSES; c++)
95 classes[c] = ra_alloc_reg_class(regs);
96 /* add each register of each class */
97 for (int r = 0; r < NUM_REG_TYPES * ETNA_MAX_TEMPS; r++)
98 ra_class_add_reg(classes[reg_get_class(r)], r);
99 /* set conflicts */
100 for (int r = 0; r < ETNA_MAX_TEMPS; r++) {
101 for (int i = 0; i < NUM_REG_TYPES; i++) {
102 for (int j = 0; j < i; j++) {
103 if (reg_writemask[i] & reg_writemask[j]) {
104 ra_add_reg_conflict(regs, NUM_REG_TYPES * r + i,
105 NUM_REG_TYPES * r + j);
106 }
107 }
108 }
109 }
110 ra_set_finalize(regs, q_values);
111
112 return regs;
113 }
114
115 void
etna_ra_assign(struct etna_compile * c,nir_shader * shader)116 etna_ra_assign(struct etna_compile *c, nir_shader *shader)
117 {
118 struct etna_compiler *compiler = c->variant->shader->compiler;
119 struct ra_regs *regs = compiler->regs;
120
121 nir_function_impl *impl = nir_shader_get_entrypoint(shader);
122
123 /* liveness and interference */
124
125 nir_index_blocks(impl);
126 nir_index_ssa_defs(impl);
127
128 nir_foreach_function_impl(impl, shader) {
129 nir_foreach_block(block, impl) {
130 nir_foreach_instr(instr, block) {
131 instr->pass_flags &= ~PASS_FLAGS_IS_DEAD_MASK;
132 }
133 }
134 }
135
136 /* this gives an approximation/upper limit on how many nodes are needed
137 * (some ssa values do not represent an allocated register)
138 */
139 unsigned max_nodes = impl->ssa_alloc;
140 unsigned *live_map = ralloc_array(NULL, unsigned, max_nodes);
141 memset(live_map, 0xff, sizeof(unsigned) * max_nodes);
142 struct live_def *defs = rzalloc_array(NULL, struct live_def, max_nodes);
143
144 unsigned num_nodes = etna_live_defs(impl, defs, live_map);
145 struct ra_graph *g = ra_alloc_interference_graph(regs, num_nodes);
146
147 /* set classes from num_components */
148 for (unsigned i = 0; i < num_nodes; i++) {
149 nir_instr *instr = defs[i].instr;
150 nir_def *def = defs[i].def;
151 unsigned comp = def->num_components - 1;
152
153 if (instr->type == nir_instr_type_alu &&
154 c->specs->has_new_transcendentals) {
155 switch (nir_instr_as_alu(instr)->op) {
156 case nir_op_fdiv:
157 case nir_op_flog2:
158 case nir_op_fsin:
159 case nir_op_fcos:
160 comp = REG_CLASS_VIRT_VEC2T;
161 break;
162 default:
163 break;
164 }
165 }
166
167 if (instr->type == nir_instr_type_intrinsic) {
168 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
169 /* can't have dst swizzle or sparse writemask on UBO loads */
170 if (intr->intrinsic == nir_intrinsic_load_ubo) {
171 assert(def == &intr->def);
172 if (def->num_components == 2)
173 comp = REG_CLASS_VIRT_VEC2C;
174 if (def->num_components == 3)
175 comp = REG_CLASS_VIRT_VEC3C;
176 }
177 }
178
179 ra_set_node_class(g, i, ra_get_class_from_index(regs, comp));
180 }
181
182 nir_foreach_block(block, impl) {
183 nir_foreach_instr(instr, block) {
184 if (instr->type != nir_instr_type_intrinsic)
185 continue;
186
187 nir_def *def = def_for_instr(instr);
188 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
189 unsigned reg;
190
191 switch (intr->intrinsic) {
192 case nir_intrinsic_store_deref: {
193 /* don't want outputs to be swizzled
194 * TODO: better would be to set the type to X/XY/XYZ/XYZW
195 * TODO: what if fragcoord.z is read after writing fragdepth?
196 */
197 nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
198 unsigned index = live_map[src_index(impl, &intr->src[1])];
199
200 if (shader->info.stage == MESA_SHADER_FRAGMENT &&
201 deref->var->data.location == FRAG_RESULT_DEPTH) {
202 ra_set_node_reg(g, index, REG_FRAG_DEPTH);
203 } else {
204 ra_set_node_class(g, index, ra_get_class_from_index(regs, REG_CLASS_VEC4));
205 }
206 } continue;
207 case nir_intrinsic_load_input:
208 reg = nir_intrinsic_base(intr) * NUM_REG_TYPES + (unsigned[]) {
209 REG_TYPE_VIRT_SCALAR_X,
210 REG_TYPE_VIRT_VEC2_XY,
211 REG_TYPE_VIRT_VEC3_XYZ,
212 REG_TYPE_VEC4,
213 }[def->num_components - 1];
214 break;
215 case nir_intrinsic_load_instance_id:
216 reg = c->variant->infile.num_reg * NUM_REG_TYPES + REG_TYPE_VIRT_SCALAR_Y;
217 break;
218 case nir_intrinsic_load_vertex_id:
219 reg = c->variant->infile.num_reg * NUM_REG_TYPES + REG_TYPE_VIRT_SCALAR_X;
220 break;
221 default:
222 continue;
223 }
224
225 ra_set_node_reg(g, live_map[def_index(impl, def)], reg);
226 }
227 }
228
229 /* add interference for intersecting live ranges */
230 for (unsigned i = 0; i < num_nodes; i++) {
231 assert(defs[i].live_start < defs[i].live_end);
232 for (unsigned j = 0; j < i; j++) {
233 if (defs[i].live_start >= defs[j].live_end || defs[j].live_start >= defs[i].live_end)
234 continue;
235 ra_add_node_interference(g, i, j);
236 }
237 }
238
239 ralloc_free(defs);
240
241 /* Allocate registers */
242 ASSERTED bool ok = ra_allocate(g);
243 assert(ok);
244
245 c->g = g;
246 c->live_map = live_map;
247 c->num_nodes = num_nodes;
248 }
249
250 unsigned
etna_ra_finish(struct etna_compile * c)251 etna_ra_finish(struct etna_compile *c)
252 {
253 /* TODO: better way to get number of registers used? */
254 unsigned j = 0;
255 for (unsigned i = 0; i < c->num_nodes; i++) {
256 j = MAX2(j, reg_get_base(c, ra_get_node_reg(c->g, i)) + 1);
257 }
258
259 ralloc_free(c->g);
260 ralloc_free(c->live_map);
261
262 return j;
263 }
264