xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/etnaviv/etnaviv_compiler_nir.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright (c) 2012-2019 Etnaviv Project
3  * Copyright (c) 2019 Zodiac Inflight Innovations
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sub license,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the
13  * next paragraph) shall be included in all copies or substantial portions
14  * of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jonathan Marek <[email protected]>
26  *    Wladimir J. van der Laan <[email protected]>
27  */
28 
29 #include "etna_core_info.h"
30 #include "etnaviv_compiler.h"
31 #include "etnaviv_compiler_nir.h"
32 #include "etnaviv_asm.h"
33 #include "etnaviv_context.h"
34 #include "etnaviv_debug.h"
35 #include "etnaviv_nir.h"
36 #include "etnaviv_uniforms.h"
37 #include "etnaviv_util.h"
38 #include "nir.h"
39 
40 #include <math.h>
41 #include "isa/enums.h"
42 #include "util/u_memory.h"
43 #include "util/register_allocate.h"
44 #include "compiler/nir/nir_builder.h"
45 
46 #include "util/compiler.h"
47 #include "util/half_float.h"
48 
49 static bool
etna_alu_to_scalar_filter_cb(const nir_instr * instr,const void * data)50 etna_alu_to_scalar_filter_cb(const nir_instr *instr, const void *data)
51 {
52    const struct etna_core_info *info = data;
53 
54    if (instr->type != nir_instr_type_alu)
55       return false;
56 
57    nir_alu_instr *alu = nir_instr_as_alu(instr);
58    switch (alu->op) {
59    case nir_op_frsq:
60    case nir_op_frcp:
61    case nir_op_flog2:
62    case nir_op_fexp2:
63    case nir_op_fsqrt:
64    case nir_op_fcos:
65    case nir_op_fsin:
66    case nir_op_fdiv:
67    case nir_op_imul:
68       return true;
69    /* TODO: can do better than alu_to_scalar for vector compares */
70    case nir_op_b32all_fequal2:
71    case nir_op_b32all_fequal3:
72    case nir_op_b32all_fequal4:
73    case nir_op_b32any_fnequal2:
74    case nir_op_b32any_fnequal3:
75    case nir_op_b32any_fnequal4:
76    case nir_op_b32all_iequal2:
77    case nir_op_b32all_iequal3:
78    case nir_op_b32all_iequal4:
79    case nir_op_b32any_inequal2:
80    case nir_op_b32any_inequal3:
81    case nir_op_b32any_inequal4:
82       return true;
83    case nir_op_fdot2:
84       if (!etna_core_has_feature(info, ETNA_FEATURE_HALTI2))
85          return true;
86       break;
87    default:
88       break;
89    }
90 
91    return false;
92 }
93 
94 static void
etna_emit_block_start(struct etna_compile * c,unsigned block)95 etna_emit_block_start(struct etna_compile *c, unsigned block)
96 {
97    c->block_ptr[block] = c->inst_ptr;
98 }
99 
100 static void
etna_emit_output(struct etna_compile * c,nir_variable * var,struct etna_inst_src src)101 etna_emit_output(struct etna_compile *c, nir_variable *var, struct etna_inst_src src)
102 {
103    struct etna_shader_io_file *sf = &c->variant->outfile;
104 
105    if (is_fs(c)) {
106       switch (var->data.location) {
107       case FRAG_RESULT_COLOR:
108       case FRAG_RESULT_DATA0: /* DATA0 is used by gallium shaders for color */
109          c->variant->ps_color_out_reg = src.reg;
110          break;
111       case FRAG_RESULT_DEPTH:
112          c->variant->ps_depth_out_reg = src.reg;
113          break;
114       default:
115          unreachable("Unsupported fs output");
116       }
117       return;
118    }
119 
120    switch (var->data.location) {
121    case VARYING_SLOT_POS:
122       c->variant->vs_pos_out_reg = src.reg;
123       break;
124    case VARYING_SLOT_PSIZ:
125       c->variant->vs_pointsize_out_reg = src.reg;
126       break;
127    default:
128       assert(sf->num_reg < ETNA_NUM_INPUTS);
129       sf->reg[sf->num_reg].reg = src.reg;
130       sf->reg[sf->num_reg].slot = var->data.location;
131       sf->reg[sf->num_reg].num_components = glsl_get_components(var->type);
132       sf->num_reg++;
133       break;
134    }
135 }
136 
137 #define OPT(nir, pass, ...) ({                             \
138    bool this_progress = false;                             \
139    NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__);      \
140    this_progress;                                          \
141 })
142 
143 static void
etna_optimize_loop(nir_shader * s)144 etna_optimize_loop(nir_shader *s)
145 {
146    bool progress;
147    do {
148       progress = false;
149 
150       NIR_PASS_V(s, nir_lower_vars_to_ssa);
151       progress |= OPT(s, nir_opt_copy_prop_vars);
152       progress |= OPT(s, nir_opt_shrink_stores, true);
153       progress |= OPT(s, nir_opt_shrink_vectors, false);
154       progress |= OPT(s, nir_copy_prop);
155       progress |= OPT(s, nir_opt_dce);
156       progress |= OPT(s, nir_opt_cse);
157       progress |= OPT(s, nir_opt_peephole_select, 16, true, true);
158       progress |= OPT(s, nir_opt_intrinsics);
159       progress |= OPT(s, nir_opt_algebraic);
160       progress |= OPT(s, nir_opt_constant_folding);
161       progress |= OPT(s, nir_opt_dead_cf);
162       if (OPT(s, nir_opt_loop)) {
163          progress = true;
164          /* If nir_opt_loop makes progress, then we need to clean
165           * things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
166           * to make progress.
167           */
168          OPT(s, nir_copy_prop);
169          OPT(s, nir_opt_dce);
170       }
171       progress |= OPT(s, nir_opt_loop_unroll);
172       progress |= OPT(s, nir_opt_if, nir_opt_if_optimize_phi_true_false);
173       progress |= OPT(s, nir_opt_remove_phis);
174       progress |= OPT(s, nir_opt_undef);
175    }
176    while (progress);
177 }
178 
179 static int
etna_glsl_type_size(const struct glsl_type * type,bool bindless)180 etna_glsl_type_size(const struct glsl_type *type, bool bindless)
181 {
182    return glsl_count_attribute_slots(type, false);
183 }
184 
185 static void
copy_uniform_state_to_shader(struct etna_shader_variant * sobj,uint64_t * consts,unsigned count)186 copy_uniform_state_to_shader(struct etna_shader_variant *sobj, uint64_t *consts, unsigned count)
187 {
188    struct etna_shader_uniform_info *uinfo = &sobj->uniforms;
189 
190    uinfo->count = count * 4;
191    uinfo->data = MALLOC(uinfo->count * sizeof(*uinfo->data));
192    uinfo->contents = MALLOC(uinfo->count * sizeof(*uinfo->contents));
193 
194    for (unsigned i = 0; i < uinfo->count; i++) {
195       uinfo->data[i] = consts[i];
196       uinfo->contents[i] = consts[i] >> 32;
197    }
198 
199    etna_set_shader_uniforms_dirty_flags(sobj);
200 }
201 
202 #define ALU_SWIZ(s) INST_SWIZ((s)->swizzle[0], (s)->swizzle[1], (s)->swizzle[2], (s)->swizzle[3])
203 #define SRC_DISABLE ((hw_src){})
204 #define SRC_CONST(idx, s) ((hw_src){.use=1, .rgroup = ISA_REG_GROUP_UNIFORM_0, .reg=idx, .swiz=s})
205 #define SRC_REG(idx, s) ((hw_src){.use=1, .rgroup = ISA_REG_GROUP_TEMP, .reg=idx, .swiz=s})
206 
207 typedef struct etna_inst_dst hw_dst;
208 typedef struct etna_inst_src hw_src;
209 
210 static inline hw_src
src_swizzle(hw_src src,unsigned swizzle)211 src_swizzle(hw_src src, unsigned swizzle)
212 {
213    if (src.rgroup != ISA_REG_GROUP_IMMED)
214       src.swiz = inst_swiz_compose(src.swiz, swizzle);
215 
216    return src;
217 }
218 
219 /* constants are represented as 64-bit ints
220  * 32-bit for the value and 32-bit for the type (imm, uniform, etc)
221  */
222 
223 #define CONST_VAL(a, b) (nir_const_value) {.u64 = (uint64_t)(a) << 32 | (uint64_t)(b)}
224 #define CONST(x) CONST_VAL(ETNA_UNIFORM_CONSTANT, x)
225 #define UNIFORM(x) CONST_VAL(ETNA_UNIFORM_UNIFORM, x)
226 #define TEXSCALE(x, i) CONST_VAL(ETNA_UNIFORM_TEXRECT_SCALE_X + (i), x)
227 #define TEXSIZE(x, i) CONST_VAL(ETNA_UNIFORM_TEXTURE_WIDTH + (i), x)
228 
229 static int
const_add(uint64_t * c,uint64_t value)230 const_add(uint64_t *c, uint64_t value)
231 {
232    for (unsigned i = 0; i < 4; i++) {
233       if (c[i] == value || !c[i]) {
234          c[i] = value;
235          return i;
236       }
237    }
238    return -1;
239 }
240 
241 static hw_src
const_src(struct etna_compile * c,nir_const_value * value,unsigned num_components)242 const_src(struct etna_compile *c, nir_const_value *value, unsigned num_components)
243 {
244    /* use inline immediates if possible */
245    if (c->info->halti >= 2 && num_components == 1 &&
246        value[0].u64 >> 32 == ETNA_UNIFORM_CONSTANT) {
247       uint32_t bits = value[0].u32;
248 
249       /* "float" - shifted by 12 */
250       if ((bits & 0xfff) == 0)
251          return etna_immediate_src(0, bits >> 12);
252 
253       /* "unsigned" - raw 20 bit value */
254       if (bits < (1 << 20))
255          return etna_immediate_src(2, bits);
256 
257       /* "signed" - sign extended 20-bit (sign included) value */
258       if (bits >= 0xfff80000)
259          return etna_immediate_src(1, bits);
260    }
261 
262    unsigned i;
263    int swiz = -1;
264    for (i = 0; swiz < 0; i++) {
265       uint64_t *a = &c->consts[i*4];
266       uint64_t save[4];
267       memcpy(save, a, sizeof(save));
268       swiz = 0;
269       for (unsigned j = 0; j < num_components; j++) {
270          int c = const_add(a, value[j].u64);
271          if (c < 0) {
272             memcpy(a, save, sizeof(save));
273             swiz = -1;
274             break;
275          }
276          swiz |= c << j * 2;
277       }
278    }
279 
280    assert(i <= ETNA_MAX_IMM / 4);
281    c->const_count = MAX2(c->const_count, i);
282 
283    return SRC_CONST(i - 1, swiz);
284 }
285 
286 /* how to swizzle when used as a src */
287 static const uint8_t
288 reg_swiz[NUM_REG_TYPES] = {
289    [REG_TYPE_VEC4] = INST_SWIZ_IDENTITY,
290    [REG_TYPE_VIRT_SCALAR_X] = INST_SWIZ_IDENTITY,
291    [REG_TYPE_VIRT_SCALAR_Y] = SWIZZLE(Y, Y, Y, Y),
292    [REG_TYPE_VIRT_VEC2_XY] = INST_SWIZ_IDENTITY,
293    [REG_TYPE_VIRT_VEC2T_XY] = INST_SWIZ_IDENTITY,
294    [REG_TYPE_VIRT_VEC2C_XY] = INST_SWIZ_IDENTITY,
295    [REG_TYPE_VIRT_SCALAR_Z] = SWIZZLE(Z, Z, Z, Z),
296    [REG_TYPE_VIRT_VEC2_XZ] = SWIZZLE(X, Z, X, Z),
297    [REG_TYPE_VIRT_VEC2_YZ] = SWIZZLE(Y, Z, Y, Z),
298    [REG_TYPE_VIRT_VEC2C_YZ] = SWIZZLE(Y, Z, Y, Z),
299    [REG_TYPE_VIRT_VEC3_XYZ] = INST_SWIZ_IDENTITY,
300    [REG_TYPE_VIRT_VEC3C_XYZ] = INST_SWIZ_IDENTITY,
301    [REG_TYPE_VIRT_SCALAR_W] = SWIZZLE(W, W, W, W),
302    [REG_TYPE_VIRT_VEC2_XW] = SWIZZLE(X, W, X, W),
303    [REG_TYPE_VIRT_VEC2_YW] = SWIZZLE(Y, W, Y, W),
304    [REG_TYPE_VIRT_VEC3_XYW] = SWIZZLE(X, Y, W, X),
305    [REG_TYPE_VIRT_VEC2_ZW] = SWIZZLE(Z, W, Z, W),
306    [REG_TYPE_VIRT_VEC2T_ZW] = SWIZZLE(Z, W, Z, W),
307    [REG_TYPE_VIRT_VEC2C_ZW] = SWIZZLE(Z, W, Z, W),
308    [REG_TYPE_VIRT_VEC3_XZW] = SWIZZLE(X, Z, W, X),
309    [REG_TYPE_VIRT_VEC3_YZW] = SWIZZLE(Y, Z, W, X),
310    [REG_TYPE_VIRT_VEC3C_YZW] = SWIZZLE(Y, Z, W, X),
311 };
312 
313 /* how to swizzle when used as a dest */
314 static const uint8_t
315 reg_dst_swiz[NUM_REG_TYPES] = {
316    [REG_TYPE_VEC4] = INST_SWIZ_IDENTITY,
317    [REG_TYPE_VIRT_SCALAR_X] = INST_SWIZ_IDENTITY,
318    [REG_TYPE_VIRT_SCALAR_Y] = SWIZZLE(X, X, X, X),
319    [REG_TYPE_VIRT_VEC2_XY] = INST_SWIZ_IDENTITY,
320    [REG_TYPE_VIRT_VEC2T_XY] = INST_SWIZ_IDENTITY,
321    [REG_TYPE_VIRT_VEC2C_XY] = INST_SWIZ_IDENTITY,
322    [REG_TYPE_VIRT_SCALAR_Z] = SWIZZLE(X, X, X, X),
323    [REG_TYPE_VIRT_VEC2_XZ] = SWIZZLE(X, X, Y, Y),
324    [REG_TYPE_VIRT_VEC2_YZ] = SWIZZLE(X, X, Y, Y),
325    [REG_TYPE_VIRT_VEC2C_YZ] = SWIZZLE(X, X, Y, Y),
326    [REG_TYPE_VIRT_VEC3_XYZ] = INST_SWIZ_IDENTITY,
327    [REG_TYPE_VIRT_VEC3C_XYZ] = INST_SWIZ_IDENTITY,
328    [REG_TYPE_VIRT_SCALAR_W] = SWIZZLE(X, X, X, X),
329    [REG_TYPE_VIRT_VEC2_XW] = SWIZZLE(X, X, Y, Y),
330    [REG_TYPE_VIRT_VEC2_YW] = SWIZZLE(X, X, Y, Y),
331    [REG_TYPE_VIRT_VEC3_XYW] = SWIZZLE(X, Y, Z, Z),
332    [REG_TYPE_VIRT_VEC2_ZW] = SWIZZLE(X, X, X, Y),
333    [REG_TYPE_VIRT_VEC2T_ZW] = SWIZZLE(X, X, X, Y),
334    [REG_TYPE_VIRT_VEC2C_ZW] = SWIZZLE(X, X, X, Y),
335    [REG_TYPE_VIRT_VEC3_XZW] = SWIZZLE(X, Y, Y, Z),
336    [REG_TYPE_VIRT_VEC3_YZW] = SWIZZLE(X, X, Y, Z),
337    [REG_TYPE_VIRT_VEC3C_YZW] = SWIZZLE(X, X, Y, Z),
338 };
339 
340 /* nir_src to allocated register */
341 static hw_src
ra_src(struct etna_compile * c,nir_src * src)342 ra_src(struct etna_compile *c, nir_src *src)
343 {
344    unsigned reg = ra_get_node_reg(c->g, c->live_map[src_index(c->impl, src)]);
345    return SRC_REG(reg_get_base(c, reg), reg_swiz[reg_get_type(reg)]);
346 }
347 
348 static hw_src
get_src(struct etna_compile * c,nir_src * src)349 get_src(struct etna_compile *c, nir_src *src)
350 {
351    nir_instr *instr = src->ssa->parent_instr;
352 
353    if (instr->pass_flags & BYPASS_SRC) {
354       assert(instr->type == nir_instr_type_alu);
355       nir_alu_instr *alu = nir_instr_as_alu(instr);
356       assert(alu->op == nir_op_mov);
357       return src_swizzle(get_src(c, &alu->src[0].src), ALU_SWIZ(&alu->src[0]));
358    }
359 
360    switch (instr->type) {
361    case nir_instr_type_load_const:
362       return const_src(c, nir_instr_as_load_const(instr)->value, src->ssa->num_components);
363    case nir_instr_type_intrinsic: {
364       nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
365       switch (intr->intrinsic) {
366       case nir_intrinsic_load_input:
367       case nir_intrinsic_load_instance_id:
368       case nir_intrinsic_load_vertex_id:
369       case nir_intrinsic_load_uniform:
370       case nir_intrinsic_load_ubo:
371       case nir_intrinsic_load_reg:
372          return ra_src(c, src);
373       case nir_intrinsic_load_front_face:
374          return (hw_src) { .use = 1, .rgroup = ISA_REG_GROUP_INTERNAL };
375       case nir_intrinsic_load_frag_coord:
376          return SRC_REG(0, INST_SWIZ_IDENTITY);
377       case nir_intrinsic_load_texture_scale: {
378          int sampler = nir_src_as_int(intr->src[0]);
379          nir_const_value values[] = {
380             TEXSCALE(sampler, 0),
381             TEXSCALE(sampler, 1),
382          };
383 
384          return src_swizzle(const_src(c, values, 2), SWIZZLE(X,Y,X,X));
385       }
386       case nir_intrinsic_load_texture_size_etna: {
387          int sampler = nir_src_as_int(intr->src[0]);
388          nir_const_value values[] = {
389             TEXSIZE(sampler, 0),
390             TEXSIZE(sampler, 1),
391             TEXSIZE(sampler, 2),
392          };
393 
394          return src_swizzle(const_src(c, values, 3), SWIZZLE(X,Y,Z,X));
395       }
396       default:
397          compile_error(c, "Unhandled NIR intrinsic type: %s\n",
398                        nir_intrinsic_infos[intr->intrinsic].name);
399          break;
400       }
401    } break;
402    case nir_instr_type_alu:
403    case nir_instr_type_tex:
404       return ra_src(c, src);
405    case nir_instr_type_undef: {
406       /* return zero to deal with broken Blur demo */
407       nir_const_value value = CONST(0);
408       return src_swizzle(const_src(c, &value, 1), SWIZZLE(X,X,X,X));
409    }
410    default:
411       compile_error(c, "Unhandled NIR instruction type: %d\n", instr->type);
412       break;
413    }
414 
415    return SRC_DISABLE;
416 }
417 
418 static bool
vec_dest_has_swizzle(nir_alu_instr * vec,nir_def * ssa)419 vec_dest_has_swizzle(nir_alu_instr *vec, nir_def *ssa)
420 {
421    for (unsigned i = 0; i < vec->def.num_components; i++) {
422       if (vec->src[i].src.ssa != ssa)
423          continue;
424 
425       if (vec->src[i].swizzle[0] != i)
426          return true;
427    }
428 
429    /* don't deal with possible bypassed vec/mov chain */
430    nir_foreach_use(use_src, ssa) {
431       nir_instr *instr = nir_src_parent_instr(use_src);
432       if (instr->type != nir_instr_type_alu)
433          continue;
434 
435       nir_alu_instr *alu = nir_instr_as_alu(instr);
436 
437       switch (alu->op) {
438       case nir_op_mov:
439       case nir_op_vec2:
440       case nir_op_vec3:
441       case nir_op_vec4:
442          return true;
443       default:
444          break;
445       }
446    }
447    return false;
448 }
449 
450 /* get allocated dest register for nir_def
451  * *p_swiz tells how the components need to be placed into register
452  */
453 static hw_dst
ra_def(struct etna_compile * c,nir_def * def,unsigned * p_swiz)454 ra_def(struct etna_compile *c, nir_def *def, unsigned *p_swiz)
455 {
456    unsigned swiz = INST_SWIZ_IDENTITY, mask = 0xf;
457    def = real_def(def, &swiz, &mask);
458 
459    unsigned r = ra_get_node_reg(c->g, c->live_map[def_index(c->impl, def)]);
460    unsigned t = reg_get_type(r);
461 
462    *p_swiz = inst_swiz_compose(swiz, reg_dst_swiz[t]);
463 
464    return (hw_dst) {
465       .use = 1,
466       .reg = reg_get_base(c, r),
467       .write_mask = inst_write_mask_compose(mask, reg_writemask[t]),
468    };
469 }
470 
471 static void
emit_alu(struct etna_compile * c,nir_alu_instr * alu)472 emit_alu(struct etna_compile *c, nir_alu_instr * alu)
473 {
474    const nir_op_info *info = &nir_op_infos[alu->op];
475 
476    /* marked as dead instruction (vecN and other bypassed instr) */
477    if (is_dead_instruction(&alu->instr))
478       return;
479 
480    assert(!(alu->op >= nir_op_vec2 && alu->op <= nir_op_vec4));
481 
482    unsigned dst_swiz;
483    hw_dst dst = ra_def(c, &alu->def, &dst_swiz);
484 
485    switch (alu->op) {
486    case nir_op_fdot2:
487    case nir_op_fdot3:
488    case nir_op_fdot4:
489       /* not per-component - don't compose dst_swiz */
490       dst_swiz = INST_SWIZ_IDENTITY;
491       break;
492    default:
493       break;
494    }
495 
496    hw_src srcs[3] = {0};
497 
498    for (int i = 0; i < info->num_inputs; i++) {
499       nir_alu_src *asrc = &alu->src[i];
500       hw_src src;
501 
502       src = src_swizzle(get_src(c, &asrc->src), ALU_SWIZ(asrc));
503       src = src_swizzle(src, dst_swiz);
504 
505       if (src.rgroup != ISA_REG_GROUP_IMMED) {
506          src.neg = is_src_mod_neg(&alu->instr, i) || (alu->op == nir_op_fneg);
507          src.abs = is_src_mod_abs(&alu->instr, i) || (alu->op == nir_op_fabs);
508       } else {
509          assert(alu->op != nir_op_fabs);
510          assert(!is_src_mod_abs(&alu->instr, i) && alu->op != nir_op_fabs);
511 
512          if (src.imm_type > 0)
513             assert(!is_src_mod_neg(&alu->instr, i));
514 
515          if (is_src_mod_neg(&alu->instr, i) && src.imm_type == 0)
516             src.imm_val ^= 0x80000;
517       }
518 
519       srcs[i] = src;
520    }
521 
522    etna_emit_alu(c, alu->op, dst, srcs, alu->op == nir_op_fsat);
523 }
524 
525 static void
emit_tex(struct etna_compile * c,nir_tex_instr * tex)526 emit_tex(struct etna_compile *c, nir_tex_instr * tex)
527 {
528    unsigned dst_swiz;
529    hw_dst dst = ra_def(c, &tex->def, &dst_swiz);
530    nir_src *coord = NULL, *src1 = NULL, *src2 = NULL;
531 
532    for (unsigned i = 0; i < tex->num_srcs; i++) {
533       switch (tex->src[i].src_type) {
534       case nir_tex_src_coord:
535          coord = &tex->src[i].src;
536          break;
537       case nir_tex_src_bias:
538       case nir_tex_src_lod:
539       case nir_tex_src_ddx:
540          assert(!src1);
541          src1 = &tex->src[i].src;
542          break;
543       case nir_tex_src_comparator:
544       case nir_tex_src_ddy:
545          src2 = &tex->src[i].src;
546          break;
547       default:
548          compile_error(c, "Unhandled NIR tex src type: %d\n",
549                        tex->src[i].src_type);
550          break;
551       }
552    }
553 
554    etna_emit_tex(c, tex->op, tex->sampler_index, dst_swiz, dst, get_src(c, coord),
555                  src1 ? get_src(c, src1) : SRC_DISABLE,
556                  src2 ? get_src(c, src2) : SRC_DISABLE);
557 }
558 
559 static void
emit_intrinsic(struct etna_compile * c,nir_intrinsic_instr * intr)560 emit_intrinsic(struct etna_compile *c, nir_intrinsic_instr * intr)
561 {
562    switch (intr->intrinsic) {
563    case nir_intrinsic_store_deref:
564       etna_emit_output(c, nir_src_as_deref(intr->src[0])->var, get_src(c, &intr->src[1]));
565       break;
566    case nir_intrinsic_terminate_if:
567       etna_emit_discard(c, get_src(c, &intr->src[0]));
568       break;
569    case nir_intrinsic_terminate:
570       etna_emit_discard(c, SRC_DISABLE);
571       break;
572    case nir_intrinsic_load_uniform: {
573       unsigned dst_swiz;
574       struct etna_inst_dst dst = ra_def(c, &intr->def, &dst_swiz);
575 
576       /* TODO: rework so extra MOV isn't required, load up to 4 addresses at once */
577       emit_inst(c, &(struct etna_inst) {
578          .opcode = ISA_OPC_MOVAR,
579          .dst.use = 1,
580          .dst.write_mask = ISA_WRMASK_X___,
581          .src[0] = get_src(c, &intr->src[0]),
582       });
583       emit_inst(c, &(struct etna_inst) {
584          .opcode = ISA_OPC_MOV,
585          .dst = dst,
586          .src[0] = {
587             .use = 1,
588             .rgroup = ISA_REG_GROUP_UNIFORM_0,
589             .reg = nir_intrinsic_base(intr),
590             .swiz = dst_swiz,
591             .amode = ISA_REG_ADDRESSING_MODE_AX,
592          },
593       });
594    } break;
595    case nir_intrinsic_load_ubo: {
596       /* TODO: if offset is of the form (x + C) then add C to the base instead */
597       unsigned idx = nir_src_as_const_value(intr->src[0])[0].u32;
598       unsigned dst_swiz;
599       emit_inst(c, &(struct etna_inst) {
600          .opcode = ISA_OPC_LOAD,
601          .type = ISA_TYPE_U32,
602          .dst = ra_def(c, &intr->def, &dst_swiz),
603          .src[0] = get_src(c, &intr->src[1]),
604          .src[1] = const_src(c, &CONST_VAL(ETNA_UNIFORM_UBO_ADDR, idx), 1),
605       });
606    } break;
607    case nir_intrinsic_load_front_face:
608    case nir_intrinsic_load_frag_coord:
609       break;
610    case nir_intrinsic_load_input:
611    case nir_intrinsic_load_instance_id:
612    case nir_intrinsic_load_vertex_id:
613    case nir_intrinsic_load_texture_scale:
614    case nir_intrinsic_load_texture_size_etna:
615    case nir_intrinsic_decl_reg:
616    case nir_intrinsic_load_reg:
617    case nir_intrinsic_store_reg:
618       break;
619    default:
620       compile_error(c, "Unhandled NIR intrinsic type: %s\n",
621                     nir_intrinsic_infos[intr->intrinsic].name);
622    }
623 }
624 
625 static void
emit_instr(struct etna_compile * c,nir_instr * instr)626 emit_instr(struct etna_compile *c, nir_instr * instr)
627 {
628    switch (instr->type) {
629    case nir_instr_type_alu:
630       emit_alu(c, nir_instr_as_alu(instr));
631       break;
632    case nir_instr_type_tex:
633       emit_tex(c, nir_instr_as_tex(instr));
634       break;
635    case nir_instr_type_intrinsic:
636       emit_intrinsic(c, nir_instr_as_intrinsic(instr));
637       break;
638    case nir_instr_type_jump:
639       assert(nir_instr_is_last(instr));
640       break;
641    case nir_instr_type_load_const:
642    case nir_instr_type_undef:
643    case nir_instr_type_deref:
644       break;
645    default:
646       compile_error(c, "Unhandled NIR instruction type: %d\n", instr->type);
647       break;
648    }
649 }
650 
651 static void
emit_block(struct etna_compile * c,nir_block * block)652 emit_block(struct etna_compile *c, nir_block * block)
653 {
654    etna_emit_block_start(c, block->index);
655 
656    nir_foreach_instr(instr, block)
657       emit_instr(c, instr);
658 
659    /* succs->index < block->index is for the loop case  */
660    nir_block *succs = block->successors[0];
661    if (nir_block_ends_in_jump(block) || succs->index < block->index)
662       etna_emit_jump(c, succs->index, SRC_DISABLE);
663 }
664 
665 static void
666 emit_cf_list(struct etna_compile *c, struct exec_list *list);
667 
668 static void
emit_if(struct etna_compile * c,nir_if * nif)669 emit_if(struct etna_compile *c, nir_if * nif)
670 {
671    etna_emit_jump(c, nir_if_first_else_block(nif)->index, get_src(c, &nif->condition));
672    emit_cf_list(c, &nif->then_list);
673 
674    /* jump at end of then_list to skip else_list
675     * not needed if then_list already ends with a jump or else_list is empty
676     */
677    if (!nir_block_ends_in_jump(nir_if_last_then_block(nif)) &&
678        !nir_cf_list_is_empty_block(&nif->else_list))
679       etna_emit_jump(c, nir_if_last_then_block(nif)->successors[0]->index, SRC_DISABLE);
680 
681    emit_cf_list(c, &nif->else_list);
682 }
683 
684 static void
emit_cf_list(struct etna_compile * c,struct exec_list * list)685 emit_cf_list(struct etna_compile *c, struct exec_list *list)
686 {
687    foreach_list_typed(nir_cf_node, node, node, list) {
688       switch (node->type) {
689       case nir_cf_node_block:
690          emit_block(c, nir_cf_node_as_block(node));
691          break;
692       case nir_cf_node_if:
693          emit_if(c, nir_cf_node_as_if(node));
694          break;
695       case nir_cf_node_loop:
696          assert(!nir_loop_has_continue_construct(nir_cf_node_as_loop(node)));
697          emit_cf_list(c, &nir_cf_node_as_loop(node)->body);
698          break;
699       default:
700          compile_error(c, "Unknown NIR node type\n");
701          break;
702       }
703    }
704 }
705 
706 /* based on nir_lower_vec_to_movs */
707 static unsigned
insert_vec_mov(nir_alu_instr * vec,unsigned start_idx,nir_shader * shader)708 insert_vec_mov(nir_alu_instr *vec, unsigned start_idx, nir_shader *shader)
709 {
710    assert(start_idx < nir_op_infos[vec->op].num_inputs);
711    unsigned write_mask = (1u << start_idx);
712 
713    nir_alu_instr *mov = nir_alu_instr_create(shader, nir_op_mov);
714    nir_alu_src_copy(&mov->src[0], &vec->src[start_idx]);
715 
716    mov->src[0].swizzle[0] = vec->src[start_idx].swizzle[0];
717 
718    if (is_src_mod_neg(&vec->instr, start_idx))
719       set_src_mod_neg(&mov->instr, 0);
720 
721    if (is_src_mod_abs(&vec->instr, start_idx))
722       set_src_mod_abs(&mov->instr, 0);
723 
724    unsigned num_components = 1;
725 
726    for (unsigned i = start_idx + 1; i < vec->def.num_components; i++) {
727       if (nir_srcs_equal(vec->src[i].src, vec->src[start_idx].src) &&
728          is_src_mod_neg(&vec->instr, i) == is_src_mod_neg(&vec->instr, start_idx) &&
729          is_src_mod_abs(&vec->instr, i) == is_src_mod_neg(&vec->instr, start_idx)) {
730          write_mask |= (1 << i);
731          mov->src[0].swizzle[num_components] = vec->src[i].swizzle[0];
732          num_components++;
733       }
734    }
735 
736    nir_def_init(&mov->instr, &mov->def, num_components, 32);
737 
738    /* replace vec srcs with inserted mov */
739    for (unsigned i = 0, j = 0; i < 4; i++) {
740       if (!(write_mask & (1 << i)))
741          continue;
742 
743       nir_src_rewrite(&vec->src[i].src, &mov->def);
744       vec->src[i].swizzle[0] = j++;
745    }
746 
747    nir_instr_insert_before(&vec->instr, &mov->instr);
748 
749    return write_mask;
750 }
751 
752 /*
753  * Get the nir_const_value from an alu src.  Also look at
754  * the parent instruction as it could be a fabs/fneg.
755  */
get_alu_cv(nir_alu_src * src)756 static nir_const_value *get_alu_cv(nir_alu_src *src)
757  {
758    nir_const_value *cv = nir_src_as_const_value(src->src);
759 
760    if (!cv &&
761        (src->src.ssa->parent_instr->type == nir_instr_type_alu)) {
762       nir_alu_instr *parent = nir_instr_as_alu(src->src.ssa->parent_instr);
763 
764       if ((parent->op == nir_op_fabs) ||
765           (parent->op == nir_op_fneg)) {
766          cv = nir_src_as_const_value(parent->src[0].src);
767 
768          if (cv) {
769             /* Validate that we are only using ETNA_UNIFORM_CONSTANT const_values. */
770             for (unsigned i = 0; i < parent->def.num_components; i++) {
771                if (cv[i].u64 >> 32 != ETNA_UNIFORM_CONSTANT) {
772                   cv = NULL;
773                   break;
774                }
775             }
776          }
777       }
778    }
779 
780    return cv;
781  }
782 
783 /*
784  * for vecN instructions:
785  * -merge constant sources into a single src
786  * -insert movs (nir_lower_vec_to_movs equivalent)
787  * for non-vecN instructions:
788  * -try to merge constants as single constant
789  * -insert movs for multiple constants if required
790  */
791 static void
lower_alu(struct etna_compile * c,nir_alu_instr * alu)792 lower_alu(struct etna_compile *c, nir_alu_instr *alu)
793 {
794    const nir_op_info *info = &nir_op_infos[alu->op];
795 
796    nir_builder b = nir_builder_at(nir_before_instr(&alu->instr));
797 
798    switch (alu->op) {
799    case nir_op_vec2:
800    case nir_op_vec3:
801    case nir_op_vec4:
802       break;
803    default:
804       if (c->specs->has_no_oneconst_limit)
805          return;
806 
807       nir_const_value value[4] = {};
808       uint8_t swizzle[4][4] = {};
809       unsigned swiz_max = 0, num_different_const_srcs = 0;
810       int first_const = -1;
811 
812       for (unsigned i = 0; i < info->num_inputs; i++) {
813          nir_const_value *cv = get_alu_cv(&alu->src[i]);
814          if (!cv)
815             continue;
816 
817          unsigned num_components = info->input_sizes[i] ?: alu->def.num_components;
818          for (unsigned j = 0; j < num_components; j++) {
819             int idx = const_add(&value[0].u64, cv[alu->src[i].swizzle[j]].u64);
820             swizzle[i][j] = idx;
821             swiz_max = MAX2(swiz_max, (unsigned) idx);
822          }
823 
824          if (first_const == -1)
825             first_const = i;
826 
827          if (!nir_srcs_equal(alu->src[first_const].src, alu->src[i].src))
828             num_different_const_srcs++;
829       }
830 
831       /* nothing to do */
832       if (num_different_const_srcs == 0)
833          return;
834 
835       /* resolve with single combined const src */
836       if (swiz_max < 4) {
837          nir_def *def = nir_build_imm(&b, swiz_max + 1, 32, value);
838 
839          for (unsigned i = 0; i < info->num_inputs; i++) {
840             nir_const_value *cv = get_alu_cv(&alu->src[i]);
841             if (!cv)
842                continue;
843 
844             nir_src_rewrite(&alu->src[i].src, def);
845 
846             for (unsigned j = 0; j < 4; j++)
847                alu->src[i].swizzle[j] = swizzle[i][j];
848          }
849          return;
850       }
851 
852       /* resolve with movs */
853       unsigned num_const = 0;
854       for (unsigned i = 0; i < info->num_inputs; i++) {
855          nir_const_value *cv = get_alu_cv(&alu->src[i]);
856          if (!cv)
857             continue;
858 
859          num_const++;
860          if (num_const == 1)
861             continue;
862 
863          nir_def *mov = nir_mov(&b, alu->src[i].src.ssa);
864          nir_src_rewrite(&alu->src[i].src, mov);
865       }
866       return;
867    }
868 
869    nir_const_value value[4];
870    unsigned num_components = 0;
871 
872    for (unsigned i = 0; i < info->num_inputs; i++) {
873       nir_const_value *cv = get_alu_cv(&alu->src[i]);
874       if (cv)
875          value[num_components++] = cv[alu->src[i].swizzle[0]];
876    }
877 
878    /* if there is more than one constant source to the vecN, combine them
879     * into a single load_const (removing the vecN completely if all components
880     * are constant)
881     */
882    if (num_components > 1) {
883       nir_def *def = nir_build_imm(&b, num_components, 32, value);
884 
885       if (num_components == info->num_inputs) {
886          nir_def_replace(&alu->def, def);
887          return;
888       }
889 
890       for (unsigned i = 0, j = 0; i < info->num_inputs; i++) {
891          nir_const_value *cv = get_alu_cv(&alu->src[i]);
892          if (!cv)
893             continue;
894 
895          nir_src_rewrite(&alu->src[i].src, def);
896          alu->src[i].swizzle[0] = j++;
897       }
898    }
899 
900    unsigned finished_write_mask = 0;
901    for (unsigned i = 0; i < alu->def.num_components; i++) {
902       nir_def *ssa = alu->src[i].src.ssa;
903 
904       /* check that vecN instruction is only user of this */
905       bool need_mov = false;
906       nir_foreach_use_including_if(use_src, ssa) {
907          if (nir_src_is_if(use_src) || nir_src_parent_instr(use_src) != &alu->instr)
908             need_mov = true;
909       }
910 
911       nir_instr *instr = ssa->parent_instr;
912       switch (instr->type) {
913       case nir_instr_type_alu:
914       case nir_instr_type_tex:
915          break;
916       case nir_instr_type_intrinsic:
917          if (nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_input) {
918             need_mov = vec_dest_has_swizzle(alu, &nir_instr_as_intrinsic(instr)->def);
919             break;
920          }
921          FALLTHROUGH;
922       default:
923          need_mov = true;
924       }
925 
926       if (need_mov && !(finished_write_mask & (1 << i)))
927          finished_write_mask |= insert_vec_mov(alu, i, c->nir);
928    }
929 }
930 
931 static bool
emit_shader(struct etna_compile * c,unsigned * num_temps,unsigned * num_consts)932 emit_shader(struct etna_compile *c, unsigned *num_temps, unsigned *num_consts)
933 {
934    nir_shader *shader = c->nir;
935    c->impl = nir_shader_get_entrypoint(shader);
936 
937    bool have_indirect_uniform = false;
938    unsigned indirect_max = 0;
939 
940    nir_builder b = nir_builder_create(c->impl);
941 
942    /* convert non-dynamic uniform loads to constants, etc */
943    nir_foreach_block(block, c->impl) {
944       nir_foreach_instr_safe(instr, block) {
945          switch(instr->type) {
946          case nir_instr_type_alu:
947             /* deals with vecN and const srcs */
948             lower_alu(c, nir_instr_as_alu(instr));
949             break;
950          case nir_instr_type_load_const: {
951             nir_load_const_instr *load_const = nir_instr_as_load_const(instr);
952             for (unsigned  i = 0; i < load_const->def.num_components; i++)
953                load_const->value[i] = CONST(load_const->value[i].u32);
954          } break;
955          case nir_instr_type_intrinsic: {
956             nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
957             /* TODO: load_ubo can also become a constant in some cases
958              * (at the moment it can end up emitting a LOAD with two
959              *  uniform sources, which could be a problem on HALTI2)
960              */
961             if (intr->intrinsic != nir_intrinsic_load_uniform)
962                break;
963             nir_const_value *off = nir_src_as_const_value(intr->src[0]);
964             if (!off || off[0].u64 >> 32 != ETNA_UNIFORM_CONSTANT) {
965                have_indirect_uniform = true;
966                indirect_max = nir_intrinsic_base(intr) + nir_intrinsic_range(intr);
967                break;
968             }
969 
970             unsigned base = nir_intrinsic_base(intr);
971             /* pre halti2 uniform offset will be float */
972             if (c->info->halti < 2)
973                base += (unsigned) off[0].f32;
974             else
975                base += off[0].u32;
976             nir_const_value value[4];
977 
978             for (unsigned i = 0; i < intr->def.num_components; i++)
979                value[i] = UNIFORM(base * 4 + i);
980 
981             b.cursor = nir_after_instr(instr);
982             nir_def *def = nir_build_imm(&b, intr->def.num_components, 32, value);
983 
984             nir_def_rewrite_uses(&intr->def, def);
985             nir_instr_remove(instr);
986          } break;
987          default:
988             break;
989          }
990       }
991    }
992 
993    /* TODO: only emit required indirect uniform ranges */
994    if (have_indirect_uniform) {
995       for (unsigned i = 0; i < indirect_max * 4; i++)
996          c->consts[i] = UNIFORM(i).u64;
997       c->const_count = indirect_max;
998    }
999 
1000    /* add mov for any store output using sysval/const and for depth stores from intrinsics */
1001    nir_foreach_block(block, c->impl) {
1002       nir_foreach_instr_safe(instr, block) {
1003          if (instr->type != nir_instr_type_intrinsic)
1004             continue;
1005 
1006          nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1007 
1008          switch (intr->intrinsic) {
1009          case nir_intrinsic_store_deref: {
1010             nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
1011             nir_src *src = &intr->src[1];
1012             if (nir_src_is_const(*src) || is_sysval(src->ssa->parent_instr) ||
1013                 (shader->info.stage == MESA_SHADER_FRAGMENT &&
1014                  deref->var->data.location == FRAG_RESULT_DEPTH &&
1015                  src->ssa->parent_instr->type != nir_instr_type_alu)) {
1016                b.cursor = nir_before_instr(instr);
1017                nir_src_rewrite(src, nir_mov(&b, src->ssa));
1018             }
1019          } break;
1020          default:
1021             break;
1022          }
1023       }
1024    }
1025 
1026    /* call directly to avoid validation (load_const don't pass validation at this point) */
1027    nir_convert_from_ssa(shader, true);
1028    nir_trivialize_registers(shader);
1029 
1030    etna_ra_assign(c, shader);
1031 
1032    emit_cf_list(c, &nir_shader_get_entrypoint(shader)->body);
1033 
1034    *num_temps = etna_ra_finish(c);
1035    *num_consts = c->const_count;
1036    return true;
1037 }
1038 
1039 static bool
etna_compile_check_limits(struct etna_shader_variant * v)1040 etna_compile_check_limits(struct etna_shader_variant *v)
1041 {
1042    const struct etna_core_info *info = v->shader->info;
1043    const struct etna_specs *specs = v->shader->specs;
1044    int max_uniforms = (v->stage == MESA_SHADER_VERTEX)
1045                          ? specs->max_vs_uniforms
1046                          : specs->max_ps_uniforms;
1047 
1048    if (!specs->has_icache && v->needs_icache) {
1049       DBG("Number of instructions (%d) exceeds maximum %d", v->code_size / 4,
1050           specs->max_instructions);
1051       return false;
1052    }
1053 
1054    if (v->num_temps > info->gpu.max_registers) {
1055       DBG("Number of registers (%d) exceeds maximum %d", v->num_temps,
1056           info->gpu.max_registers);
1057       return false;
1058    }
1059 
1060    if (v->uniforms.count / 4 > max_uniforms) {
1061       DBG("Number of uniforms (%d) exceeds maximum %d",
1062           v->uniforms.count / 4, max_uniforms);
1063       return false;
1064    }
1065 
1066    if (v->stage == MESA_SHADER_VERTEX) {
1067       int num_outputs = v->vs_pointsize_out_reg >= 0 ? 2 : 1;
1068 
1069       num_outputs += v->outfile.num_reg;
1070 
1071       if (num_outputs > specs->max_vs_outputs) {
1072          DBG("Number of VS outputs (%zu) exceeds maximum %d",
1073              v->outfile.num_reg, specs->max_vs_outputs);
1074          return false;
1075       }
1076    }
1077 
1078    return true;
1079 }
1080 
1081 static void
fill_vs_mystery(struct etna_shader_variant * v)1082 fill_vs_mystery(struct etna_shader_variant *v)
1083 {
1084    const struct etna_core_info *info = v->shader->info;
1085 
1086    v->input_count_unk8 = DIV_ROUND_UP(v->infile.num_reg + 4, 16); /* XXX what is this */
1087 
1088    /* fill in "mystery meat" load balancing value. This value determines how
1089     * work is scheduled between VS and PS
1090     * in the unified shader architecture. More precisely, it is determined from
1091     * the number of VS outputs, as well as chip-specific
1092     * vertex output buffer size, vertex cache size, and the number of shader
1093     * cores.
1094     *
1095     * XXX this is a conservative estimate, the "optimal" value is only known for
1096     * sure at link time because some
1097     * outputs may be unused and thus unmapped. Then again, in the general use
1098     * case with GLSL the vertex and fragment
1099     * shaders are linked already before submitting to Gallium, thus all outputs
1100     * are used.
1101     *
1102     * note: TGSI compiler counts all outputs (including position and pointsize), here
1103     * v->outfile.num_reg only counts varyings, +1 to compensate for the position output
1104     * TODO: might have a problem that we don't count pointsize when it is used
1105     */
1106 
1107    int half_out = v->outfile.num_reg / 2 + 1;
1108    assert(half_out);
1109 
1110    uint32_t b = ((20480 / (info->gpu.vertex_output_buffer_size -
1111                            2 * half_out * info->gpu.vertex_cache_size)) +
1112                  9) /
1113                 10;
1114    uint32_t a = (b + 256 / (info->gpu.shader_core_count * half_out)) / 2;
1115    v->vs_load_balancing = VIVS_VS_LOAD_BALANCING_A(MIN2(a, 255)) |
1116                              VIVS_VS_LOAD_BALANCING_B(MIN2(b, 255)) |
1117                              VIVS_VS_LOAD_BALANCING_C(0x3f) |
1118                              VIVS_VS_LOAD_BALANCING_D(0x0f);
1119 }
1120 
1121 bool
etna_compile_shader(struct etna_shader_variant * v)1122 etna_compile_shader(struct etna_shader_variant *v)
1123 {
1124    if (unlikely(!v))
1125       return false;
1126 
1127    struct etna_compile *c = CALLOC_STRUCT(etna_compile);
1128    if (!c)
1129       return false;
1130 
1131    c->variant = v;
1132    c->info = v->shader->info;
1133    c->specs = v->shader->specs;
1134    c->nir = nir_shader_clone(NULL, v->shader->nir);
1135 
1136    nir_shader *s = c->nir;
1137    const struct etna_specs *specs = c->specs;
1138 
1139    v->stage = s->info.stage;
1140    v->uses_discard = s->info.fs.uses_discard;
1141    v->num_loops = 0; /* TODO */
1142    v->vs_id_in_reg = -1;
1143    v->vs_pos_out_reg = -1;
1144    v->vs_pointsize_out_reg = -1;
1145    v->ps_color_out_reg = 0; /* 0 for shader that doesn't write fragcolor.. */
1146    v->ps_depth_out_reg = -1;
1147 
1148    /*
1149     * Lower glTexCoord, fixes e.g. neverball point sprite (exit cylinder stars)
1150     * and gl4es pointsprite.trace apitrace
1151     */
1152    if (s->info.stage == MESA_SHADER_FRAGMENT && v->key.sprite_coord_enable) {
1153       NIR_PASS_V(s, nir_lower_texcoord_replace, v->key.sprite_coord_enable,
1154                  false, v->key.sprite_coord_yinvert);
1155    }
1156 
1157    /*
1158     * Remove any dead in variables before we iterate over them
1159     */
1160    NIR_PASS_V(s, nir_remove_dead_variables, nir_var_shader_in, NULL);
1161 
1162    /* setup input linking */
1163    struct etna_shader_io_file *sf = &v->infile;
1164    if (s->info.stage == MESA_SHADER_VERTEX) {
1165       nir_foreach_shader_in_variable(var, s) {
1166          unsigned idx = var->data.driver_location;
1167          sf->reg[idx].reg = idx;
1168          sf->reg[idx].slot = var->data.location;
1169          sf->reg[idx].num_components = glsl_get_components(var->type);
1170          sf->num_reg = MAX2(sf->num_reg, idx+1);
1171       }
1172    } else {
1173       unsigned count = 0;
1174       nir_foreach_shader_in_variable(var, s) {
1175          unsigned idx = var->data.driver_location;
1176          sf->reg[idx].reg = idx + 1;
1177          sf->reg[idx].slot = var->data.location;
1178          sf->reg[idx].num_components = glsl_get_components(var->type);
1179          sf->num_reg = MAX2(sf->num_reg, idx+1);
1180          count++;
1181       }
1182       assert(sf->num_reg == count);
1183    }
1184 
1185    NIR_PASS_V(s, nir_lower_io, nir_var_shader_in | nir_var_uniform, etna_glsl_type_size,
1186             (nir_lower_io_options)0);
1187 
1188    NIR_PASS_V(s, nir_lower_vars_to_ssa);
1189    NIR_PASS_V(s, nir_lower_indirect_derefs, nir_var_all, UINT32_MAX);
1190    NIR_PASS_V(s, etna_nir_lower_texture, &v->key);
1191 
1192    NIR_PASS_V(s, nir_lower_alu_to_scalar, etna_alu_to_scalar_filter_cb, c->info);
1193    if (c->info->halti >= 2) {
1194       nir_lower_idiv_options idiv_options = {
1195          .allow_fp16 = true,
1196       };
1197       NIR_PASS_V(s, nir_lower_idiv, &idiv_options);
1198    }
1199    NIR_PASS_V(s, nir_lower_alu);
1200 
1201    etna_optimize_loop(s);
1202 
1203    /* TODO: remove this extra run if nir_opt_peephole_select is able to handle ubo's. */
1204    if (OPT(s, etna_nir_lower_ubo_to_uniform))
1205       etna_optimize_loop(s);
1206 
1207    NIR_PASS_V(s, etna_lower_io, v);
1208    NIR_PASS_V(s, nir_lower_pack);
1209    etna_optimize_loop(s);
1210 
1211    if (v->shader->specs->vs_need_z_div)
1212       NIR_PASS_V(s, nir_lower_clip_halfz);
1213 
1214    /* lower pre-halti2 to float (halti0 has integers, but only scalar..) */
1215    if (c->info->halti < 2) {
1216       /* use opt_algebraic between int_to_float and boot_to_float because
1217        * int_to_float emits ftrunc, and ftrunc lowering generates bool ops
1218        */
1219       NIR_PASS_V(s, nir_lower_int_to_float);
1220       NIR_PASS_V(s, nir_opt_algebraic);
1221       NIR_PASS_V(s, nir_lower_bool_to_float, true);
1222    } else {
1223       NIR_PASS_V(s, nir_lower_bool_to_int32);
1224    }
1225 
1226    while( OPT(s, nir_opt_vectorize, NULL, NULL) );
1227    NIR_PASS_V(s, nir_lower_alu_to_scalar, etna_alu_to_scalar_filter_cb, c->info);
1228 
1229    NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp, NULL);
1230    NIR_PASS_V(s, nir_opt_algebraic_late);
1231 
1232    NIR_PASS_V(s, nir_move_vec_src_uses_to_dest, false);
1233    NIR_PASS_V(s, nir_copy_prop);
1234    /* need copy prop after uses_to_dest, and before src mods: see
1235     * dEQP-GLES2.functional.shaders.random.all_features.fragment.95
1236     */
1237 
1238    NIR_PASS_V(s, nir_opt_dce);
1239    NIR_PASS_V(s, nir_opt_cse);
1240 
1241    NIR_PASS_V(s, nir_lower_bool_to_bitsize);
1242    NIR_PASS_V(s, etna_lower_alu, c->specs->has_new_transcendentals);
1243 
1244    /* needs to be the last pass that touches pass_flags! */
1245    NIR_PASS_V(s, etna_nir_lower_to_source_mods);
1246 
1247    if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS))
1248       nir_print_shader(s, stdout);
1249 
1250    unsigned block_ptr[nir_shader_get_entrypoint(s)->num_blocks];
1251    c->block_ptr = block_ptr;
1252 
1253    unsigned num_consts;
1254    ASSERTED bool ok = emit_shader(c, &v->num_temps, &num_consts);
1255    assert(ok);
1256 
1257    /* empty shader, emit NOP */
1258    if (!c->inst_ptr)
1259       emit_inst(c, &(struct etna_inst) { .opcode = ISA_OPC_NOP });
1260 
1261    /* assemble instructions, fixing up labels */
1262    uint32_t *code = MALLOC(c->inst_ptr * 16);
1263    for (unsigned i = 0; i < c->inst_ptr; i++) {
1264       struct etna_inst *inst = &c->code[i];
1265       if (inst->opcode == ISA_OPC_BRANCH || inst->opcode == ISA_OPC_BRANCH_UNARY || inst->opcode == ISA_OPC_BRANCH_BINARY)
1266          inst->imm = block_ptr[inst->imm];
1267 
1268       etna_assemble(&code[i * 4], inst, specs->has_no_oneconst_limit);
1269    }
1270 
1271    v->code_size = c->inst_ptr * 4;
1272    v->code = code;
1273    v->needs_icache = c->inst_ptr > specs->max_instructions;
1274 
1275    copy_uniform_state_to_shader(v, c->consts, num_consts);
1276 
1277    if (s->info.stage == MESA_SHADER_FRAGMENT) {
1278       v->input_count_unk8 = 31; /* XXX what is this */
1279       assert(v->ps_depth_out_reg <= 0);
1280    } else {
1281       fill_vs_mystery(v);
1282    }
1283 
1284    bool result = etna_compile_check_limits(v);
1285    ralloc_free(c->nir);
1286    FREE(c);
1287    return result;
1288 }
1289 
1290 static const struct etna_shader_inout *
etna_shader_vs_lookup(const struct etna_shader_variant * sobj,const struct etna_shader_inout * in)1291 etna_shader_vs_lookup(const struct etna_shader_variant *sobj,
1292                       const struct etna_shader_inout *in)
1293 {
1294    for (int i = 0; i < sobj->outfile.num_reg; i++)
1295       if (sobj->outfile.reg[i].slot == in->slot)
1296          return &sobj->outfile.reg[i];
1297 
1298    /*
1299     * There are valid NIR shaders pairs where the vertex shader has
1300     * a VARYING_SLOT_BFC0 shader_out and the corresponding framgent
1301     * shader has a VARYING_SLOT_COL0 shader_in.
1302     * So at link time if there is no matching VARYING_SLOT_BFC[n],
1303     * we must map VARYING_SLOT_BFC0[n] to VARYING_SLOT_COL[n].
1304     */
1305    gl_varying_slot slot;
1306 
1307    if (in->slot == VARYING_SLOT_COL0)
1308       slot = VARYING_SLOT_BFC0;
1309    else if (in->slot == VARYING_SLOT_COL1)
1310       slot = VARYING_SLOT_BFC1;
1311    else
1312       return NULL;
1313 
1314    for (int i = 0; i < sobj->outfile.num_reg; i++)
1315       if (sobj->outfile.reg[i].slot == slot)
1316          return &sobj->outfile.reg[i];
1317 
1318    return NULL;
1319 }
1320 
1321 void
etna_link_shader(struct etna_shader_link_info * info,const struct etna_shader_variant * vs,const struct etna_shader_variant * fs)1322 etna_link_shader(struct etna_shader_link_info *info,
1323                  const struct etna_shader_variant *vs,
1324                  const struct etna_shader_variant *fs)
1325 {
1326    int comp_ofs = 0;
1327    /* For each fragment input we need to find the associated vertex shader
1328     * output, which can be found by matching on semantic name and index. A
1329     * binary search could be used because the vs outputs are sorted by their
1330     * semantic index and grouped by semantic type by fill_in_vs_outputs.
1331     */
1332    assert(fs->infile.num_reg <= ETNA_NUM_INPUTS);
1333    info->pcoord_varying_comp_ofs = -1;
1334 
1335    for (int idx = 0; idx < fs->infile.num_reg; ++idx) {
1336       const struct etna_shader_inout *fsio = &fs->infile.reg[idx];
1337       const struct etna_shader_inout *vsio = etna_shader_vs_lookup(vs, fsio);
1338       struct etna_varying *varying;
1339       bool interpolate_always = true;
1340 
1341       assert(fsio->reg > 0 && fsio->reg <= ARRAY_SIZE(info->varyings));
1342 
1343       if (fsio->reg > info->num_varyings)
1344          info->num_varyings = fsio->reg;
1345 
1346       varying = &info->varyings[fsio->reg - 1];
1347       varying->num_components = fsio->num_components;
1348 
1349       if (!interpolate_always) /* colors affected by flat shading */
1350          varying->pa_attributes = 0x200;
1351       else /* texture coord or other bypasses flat shading */
1352          varying->pa_attributes = 0x2f1;
1353 
1354       varying->use[0] = VARYING_COMPONENT_USE_UNUSED;
1355       varying->use[1] = VARYING_COMPONENT_USE_UNUSED;
1356       varying->use[2] = VARYING_COMPONENT_USE_UNUSED;
1357       varying->use[3] = VARYING_COMPONENT_USE_UNUSED;
1358 
1359       /* point/tex coord is an input to the PS without matching VS output,
1360        * so it gets a varying slot without being assigned a VS register.
1361        */
1362       if (fsio->slot == VARYING_SLOT_PNTC) {
1363          varying->use[0] = VARYING_COMPONENT_USE_POINTCOORD_X;
1364          varying->use[1] = VARYING_COMPONENT_USE_POINTCOORD_Y;
1365 
1366          info->pcoord_varying_comp_ofs = comp_ofs;
1367       } else if (util_varying_is_point_coord(fsio->slot, fs->key.sprite_coord_enable)) {
1368          /*
1369 	  * Do nothing, TexCoord is lowered to PointCoord above
1370 	  * and the TexCoord here is just a remnant. This needs
1371 	  * to be removed with some nir_remove_dead_variables(),
1372 	  * but that one removes all FS inputs ... why?
1373 	  */
1374       } else {
1375          /* pick a random register to use if there is no VS output */
1376          if (vsio == NULL)
1377             varying->reg = 0;
1378          else
1379             varying->reg = vsio->reg;
1380       }
1381 
1382       comp_ofs += varying->num_components;
1383    }
1384 
1385    assert(info->num_varyings == fs->infile.num_reg);
1386 }
1387