1 /*
2 * Copyright © 2019 Google, Inc.
3 * SPDX-License-Identifier: MIT
4 */
5
6 #include "compiler/nir/nir.h"
7 #include "compiler/nir/nir_builder.h"
8 #include "util/u_math.h"
9 #include "ir3_compiler.h"
10 #include "ir3_nir.h"
11
12 static inline bool
get_ubo_load_range(nir_shader * nir,nir_intrinsic_instr * instr,uint32_t alignment,struct ir3_ubo_range * r)13 get_ubo_load_range(nir_shader *nir, nir_intrinsic_instr *instr,
14 uint32_t alignment, struct ir3_ubo_range *r)
15 {
16 uint32_t offset = nir_intrinsic_range_base(instr);
17 uint32_t size = nir_intrinsic_range(instr);
18
19 if (instr->intrinsic == nir_intrinsic_load_global_ir3) {
20 offset *= 4;
21 size *= 4;
22 }
23
24 /* If the offset is constant, the range is trivial (and NIR may not have
25 * figured it out).
26 */
27 if (nir_src_is_const(instr->src[1])) {
28 offset = nir_src_as_uint(instr->src[1]);
29 if (instr->intrinsic == nir_intrinsic_load_global_ir3)
30 offset *= 4;
31 size = nir_intrinsic_dest_components(instr) * 4;
32 }
33
34 /* If we haven't figured out the range accessed in the UBO, bail. */
35 if (size == ~0)
36 return false;
37
38 r->start = ROUND_DOWN_TO(offset, alignment * 16);
39 r->end = ALIGN(offset + size, alignment * 16);
40
41 return true;
42 }
43
44 static bool
get_ubo_info(nir_intrinsic_instr * instr,struct ir3_ubo_info * ubo)45 get_ubo_info(nir_intrinsic_instr *instr, struct ir3_ubo_info *ubo)
46 {
47 if (instr->intrinsic == nir_intrinsic_load_global_ir3) {
48 ubo->global_base = instr->src[0].ssa;
49 ubo->block = 0;
50 ubo->bindless_base = 0;
51 ubo->bindless = false;
52 ubo->global = true;
53 return true;
54 } else if (nir_src_is_const(instr->src[0])) {
55 ubo->global_base = NULL;
56 ubo->block = nir_src_as_uint(instr->src[0]);
57 ubo->bindless_base = 0;
58 ubo->bindless = false;
59 ubo->global = false;
60 return true;
61 } else {
62 nir_intrinsic_instr *rsrc = ir3_bindless_resource(instr->src[0]);
63 if (rsrc && nir_src_is_const(rsrc->src[0])) {
64 ubo->global_base = NULL;
65 ubo->block = nir_src_as_uint(rsrc->src[0]);
66 ubo->bindless_base = nir_intrinsic_desc_set(rsrc);
67 ubo->bindless = true;
68 ubo->global = false;
69 return true;
70 }
71 }
72 return false;
73 }
74
75 /**
76 * Finds the given instruction's UBO load in the UBO upload plan, if any.
77 */
78 static const struct ir3_ubo_range *
get_existing_range(nir_intrinsic_instr * instr,const struct ir3_ubo_analysis_state * state,struct ir3_ubo_range * r)79 get_existing_range(nir_intrinsic_instr *instr,
80 const struct ir3_ubo_analysis_state *state,
81 struct ir3_ubo_range *r)
82 {
83 struct ir3_ubo_info ubo = {};
84
85 if (!get_ubo_info(instr, &ubo))
86 return NULL;
87
88 for (int i = 0; i < state->num_enabled; i++) {
89 const struct ir3_ubo_range *range = &state->range[i];
90 if (!memcmp(&range->ubo, &ubo, sizeof(ubo)) && r->start >= range->start &&
91 r->end <= range->end) {
92 return range;
93 }
94 }
95
96 return NULL;
97 }
98
99 /**
100 * Merges together neighboring/overlapping ranges in the range plan with a
101 * newly updated range.
102 */
103 static void
merge_neighbors(struct ir3_ubo_analysis_state * state,int index)104 merge_neighbors(struct ir3_ubo_analysis_state *state, int index)
105 {
106 struct ir3_ubo_range *a = &state->range[index];
107
108 /* index is always the first slot that would have neighbored/overlapped with
109 * the new range.
110 */
111 for (int i = index + 1; i < state->num_enabled; i++) {
112 struct ir3_ubo_range *b = &state->range[i];
113 if (memcmp(&a->ubo, &b->ubo, sizeof(a->ubo)))
114 continue;
115
116 if (a->start > b->end || a->end < b->start)
117 continue;
118
119 /* Merge B into A. */
120 a->start = MIN2(a->start, b->start);
121 a->end = MAX2(a->end, b->end);
122
123 /* Swap the last enabled range into B's now unused slot */
124 *b = state->range[--state->num_enabled];
125 }
126 }
127
128 /**
129 * During the first pass over the shader, makes the plan of which UBO upload
130 * should include the range covering this UBO load.
131 *
132 * We are passed in an upload_remaining of how much space is left for us in
133 * the const file, and we make sure our plan doesn't exceed that.
134 */
135 static void
gather_ubo_ranges(nir_shader * nir,nir_intrinsic_instr * instr,struct ir3_ubo_analysis_state * state,uint32_t alignment,uint32_t * upload_remaining)136 gather_ubo_ranges(nir_shader *nir, nir_intrinsic_instr *instr,
137 struct ir3_ubo_analysis_state *state, uint32_t alignment,
138 uint32_t *upload_remaining)
139 {
140 struct ir3_ubo_info ubo = {};
141 if (!get_ubo_info(instr, &ubo))
142 return;
143
144 struct ir3_ubo_range r;
145 if (!get_ubo_load_range(nir, instr, alignment, &r))
146 return;
147
148 /* See if there's an existing range for this UBO we want to merge into. */
149 for (int i = 0; i < state->num_enabled; i++) {
150 struct ir3_ubo_range *plan_r = &state->range[i];
151 if (memcmp(&plan_r->ubo, &ubo, sizeof(ubo)))
152 continue;
153
154 /* Don't extend existing uploads unless they're
155 * neighboring/overlapping.
156 */
157 if (r.start > plan_r->end || r.end < plan_r->start)
158 continue;
159
160 r.start = MIN2(r.start, plan_r->start);
161 r.end = MAX2(r.end, plan_r->end);
162
163 uint32_t added = (plan_r->start - r.start) + (r.end - plan_r->end);
164 if (added >= *upload_remaining)
165 return;
166
167 plan_r->start = r.start;
168 plan_r->end = r.end;
169 *upload_remaining -= added;
170
171 merge_neighbors(state, i);
172 return;
173 }
174
175 if (state->num_enabled == ARRAY_SIZE(state->range))
176 return;
177
178 uint32_t added = r.end - r.start;
179 if (added >= *upload_remaining)
180 return;
181
182 struct ir3_ubo_range *plan_r = &state->range[state->num_enabled++];
183 plan_r->ubo = ubo;
184 plan_r->start = r.start;
185 plan_r->end = r.end;
186 *upload_remaining -= added;
187 }
188
189 /* For indirect offset, it is common to see a pattern of multiple
190 * loads with the same base, but different constant offset, ie:
191 *
192 * vec1 32 ssa_33 = iadd ssa_base, const_offset
193 * vec4 32 ssa_34 = intrinsic load_const_ir3 (ssa_33) (base=N, 0, 0)
194 *
195 * Detect this, and peel out the const_offset part, to end up with:
196 *
197 * vec4 32 ssa_34 = intrinsic load_const_ir3 (ssa_base) (base=N+const_offset,
198 * 0, 0)
199 *
200 * Or similarly:
201 *
202 * vec1 32 ssa_33 = imad24_ir3 a, b, const_offset
203 * vec4 32 ssa_34 = intrinsic load_const_ir3 (ssa_33) (base=N, 0, 0)
204 *
205 * Can be converted to:
206 *
207 * vec1 32 ssa_base = imul24 a, b
208 * vec4 32 ssa_34 = intrinsic load_const_ir3 (ssa_base) (base=N+const_offset,
209 * 0, 0)
210 *
211 * This gives the other opt passes something much easier to work
212 * with (ie. not requiring value range tracking)
213 */
214 static void
handle_partial_const(nir_builder * b,nir_def ** srcp,int * offp)215 handle_partial_const(nir_builder *b, nir_def **srcp, int *offp)
216 {
217 if ((*srcp)->parent_instr->type != nir_instr_type_alu)
218 return;
219
220 nir_alu_instr *alu = nir_instr_as_alu((*srcp)->parent_instr);
221
222 if (alu->op == nir_op_imad24_ir3) {
223 /* This case is slightly more complicated as we need to
224 * replace the imad24_ir3 with an imul24:
225 */
226 if (!nir_src_is_const(alu->src[2].src))
227 return;
228
229 *offp += nir_src_as_uint(alu->src[2].src);
230 *srcp = nir_imul24(b, nir_ssa_for_alu_src(b, alu, 0),
231 nir_ssa_for_alu_src(b, alu, 1));
232
233 return;
234 }
235
236 if (alu->op != nir_op_iadd)
237 return;
238
239 if (nir_src_is_const(alu->src[0].src)) {
240 *offp += nir_src_as_uint(alu->src[0].src);
241 *srcp = alu->src[1].src.ssa;
242 } else if (nir_src_is_const(alu->src[1].src)) {
243 *srcp = alu->src[0].src.ssa;
244 *offp += nir_src_as_uint(alu->src[1].src);
245 }
246 }
247
248 /* Tracks the maximum bindful UBO accessed so that we reduce the UBO
249 * descriptors emitted in the fast path for GL.
250 */
251 static void
track_ubo_use(nir_intrinsic_instr * instr,nir_builder * b,int * num_ubos)252 track_ubo_use(nir_intrinsic_instr *instr, nir_builder *b, int *num_ubos)
253 {
254 if (ir3_bindless_resource(instr->src[0])) {
255 assert(!b->shader->info.first_ubo_is_default_ubo); /* only set for GL */
256 return;
257 }
258
259 if (nir_src_is_const(instr->src[0])) {
260 int block = nir_src_as_uint(instr->src[0]);
261 *num_ubos = MAX2(*num_ubos, block + 1);
262 } else {
263 *num_ubos = b->shader->info.num_ubos;
264 }
265 }
266
267 static bool
lower_ubo_load_to_uniform(nir_intrinsic_instr * instr,nir_builder * b,const struct ir3_ubo_analysis_state * state,int * num_ubos,uint32_t alignment)268 lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
269 const struct ir3_ubo_analysis_state *state,
270 int *num_ubos, uint32_t alignment)
271 {
272 b->cursor = nir_before_instr(&instr->instr);
273
274 struct ir3_ubo_range r;
275 if (!get_ubo_load_range(b->shader, instr, alignment, &r)) {
276 if (instr->intrinsic == nir_intrinsic_load_ubo)
277 track_ubo_use(instr, b, num_ubos);
278 return false;
279 }
280
281 /* We don't lower dynamic block index UBO loads to load_const_ir3, but we
282 * could probably with some effort determine a block stride in number of
283 * registers.
284 */
285 const struct ir3_ubo_range *range = get_existing_range(instr, state, &r);
286 if (!range) {
287 if (instr->intrinsic == nir_intrinsic_load_ubo)
288 track_ubo_use(instr, b, num_ubos);
289 return false;
290 }
291
292 nir_def *ubo_offset = instr->src[1].ssa;
293 int const_offset = 0;
294
295 handle_partial_const(b, &ubo_offset, &const_offset);
296
297 nir_def *uniform_offset = ubo_offset;
298
299 if (instr->intrinsic == nir_intrinsic_load_ubo) {
300 /* UBO offset is in bytes, but uniform offset is in units of
301 * dwords, so we need to divide by 4 (right-shift by 2). For ldc the
302 * offset is in units of 16 bytes, so we need to multiply by 4. And
303 * also the same for the constant part of the offset:
304 */
305 const int shift = -2;
306 nir_def *new_offset = ir3_nir_try_propagate_bit_shift(b, ubo_offset, -2);
307 if (new_offset) {
308 uniform_offset = new_offset;
309 } else {
310 uniform_offset = shift > 0
311 ? nir_ishl_imm(b, ubo_offset, shift)
312 : nir_ushr_imm(b, ubo_offset, -shift);
313 }
314 }
315
316 assert(!(const_offset & 0x3));
317 const_offset >>= 2;
318
319 const int range_offset = ((int)range->offset - (int)range->start) / 4;
320 const_offset += range_offset;
321
322 /* The range_offset could be negative, if if only part of the UBO
323 * block is accessed, range->start can be greater than range->offset.
324 * But we can't underflow const_offset. If necessary we need to
325 * insert nir instructions to compensate (which can hopefully be
326 * optimized away)
327 */
328 if (const_offset < 0) {
329 uniform_offset = nir_iadd_imm(b, uniform_offset, const_offset);
330 const_offset = 0;
331 }
332
333 nir_def *uniform =
334 nir_load_const_ir3(b, instr->num_components, instr->def.bit_size,
335 uniform_offset, .base = const_offset);
336
337 nir_def_replace(&instr->def, uniform);
338
339 return true;
340 }
341
342 static bool
rematerialize_load_global_bases(nir_shader * nir,struct ir3_ubo_analysis_state * state)343 rematerialize_load_global_bases(nir_shader *nir,
344 struct ir3_ubo_analysis_state *state)
345 {
346 bool has_load_global = false;
347 for (unsigned i = 0; i < state->num_enabled; i++) {
348 if (state->range[i].ubo.global) {
349 has_load_global = true;
350 break;
351 }
352 }
353
354 if (!has_load_global)
355 return false;
356
357 nir_function_impl *preamble = nir_shader_get_preamble(nir);
358 nir_builder _b = nir_builder_at(nir_after_impl(preamble));
359 nir_builder *b = &_b;
360
361 for (unsigned i = 0; i < state->num_enabled; i++) {
362 struct ir3_ubo_range *range = &state->range[i];
363
364 if (!range->ubo.global)
365 continue;
366
367 range->ubo.global_base =
368 ir3_rematerialize_def_for_preamble(b, range->ubo.global_base, NULL,
369 NULL);
370 }
371
372 return true;
373 }
374
375 static bool
copy_global_to_uniform(nir_shader * nir,struct ir3_ubo_analysis_state * state)376 copy_global_to_uniform(nir_shader *nir, struct ir3_ubo_analysis_state *state)
377 {
378 if (state->num_enabled == 0)
379 return false;
380
381 nir_function_impl *preamble = nir_shader_get_preamble(nir);
382 nir_builder _b = nir_builder_at(nir_after_impl(preamble));
383 nir_builder *b = &_b;
384
385 for (unsigned i = 0; i < state->num_enabled; i++) {
386 const struct ir3_ubo_range *range = &state->range[i];
387 assert(range->ubo.global);
388
389 nir_def *base =
390 ir3_rematerialize_def_for_preamble(b, range->ubo.global_base, NULL,
391 NULL);
392 unsigned start = range->start;
393 if (start > (1 << 10)) {
394 /* This is happening pretty late, so we need to add the offset
395 * manually ourselves.
396 */
397 nir_def *start_val = nir_imm_int(b, start);
398 nir_def *base_lo = nir_channel(b, base, 0);
399 nir_def *base_hi = nir_channel(b, base, 1);
400 nir_def *carry = nir_b2i32(b, nir_ult(b, base_lo, start_val));
401 base_lo = nir_iadd(b, base_lo, start_val);
402 base_hi = nir_iadd(b, base_hi, carry);
403 base = nir_vec2(b, base_lo, base_hi);
404 start = 0;
405 }
406
407 unsigned size = (range->end - range->start);
408 for (unsigned offset = 0; offset < size; offset += 16) {
409 unsigned const_offset = range->offset / 4 + offset / 4;
410 if (const_offset < 256) {
411 nir_copy_global_to_uniform_ir3(b, base,
412 .base = start + offset,
413 .range_base = const_offset,
414 .range = 1);
415 } else {
416 /* It seems that the a1.x format doesn't work, so we need to
417 * decompose the ldg.k into ldg + stc.
418 */
419 nir_def *load =
420 nir_load_global_ir3(b, 4, 32, base,
421 nir_imm_int(b, (start + offset) / 4));
422 nir_store_const_ir3(b, load, .base = const_offset);
423 }
424 }
425 }
426
427 return true;
428 }
429
430 static bool
copy_ubo_to_uniform(nir_shader * nir,const struct ir3_const_state * const_state,bool const_data_via_cp)431 copy_ubo_to_uniform(nir_shader *nir, const struct ir3_const_state *const_state,
432 bool const_data_via_cp)
433 {
434 const struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
435
436 if (state->num_enabled == 0)
437 return false;
438
439 if (state->num_enabled == 1 &&
440 !state->range[0].ubo.bindless &&
441 state->range[0].ubo.block == const_state->consts_ubo.idx &&
442 const_data_via_cp)
443 return false;
444
445 nir_function_impl *preamble = nir_shader_get_preamble(nir);
446 nir_builder _b = nir_builder_at(nir_after_impl(preamble));
447 nir_builder *b = &_b;
448
449 for (unsigned i = 0; i < state->num_enabled; i++) {
450 const struct ir3_ubo_range *range = &state->range[i];
451
452 /* The constant_data UBO is pushed in a different path from normal
453 * uniforms, and the state is setup earlier so it makes more sense to let
454 * the CP do it for us.
455 */
456 if (!range->ubo.bindless &&
457 range->ubo.block == const_state->consts_ubo.idx &&
458 const_data_via_cp)
459 continue;
460
461 nir_def *ubo = nir_imm_int(b, range->ubo.block);
462 if (range->ubo.bindless) {
463 ubo = nir_bindless_resource_ir3(b, 32, ubo,
464 .desc_set = range->ubo.bindless_base);
465 }
466
467 /* ldc.k has a range of only 256, but there are 512 vec4 constants.
468 * Therefore we may have to split a large copy in two.
469 */
470 unsigned size = (range->end - range->start) / 16;
471 for (unsigned offset = 0; offset < size; offset += 256) {
472 nir_copy_ubo_to_uniform_ir3(b, ubo, nir_imm_int(b, range->start / 16 +
473 offset),
474 .base = range->offset / 4 + offset * 4,
475 .range = MIN2(size - offset, 256));
476 }
477 }
478
479 return true;
480 }
481
482 static bool
instr_is_load_ubo(nir_instr * instr)483 instr_is_load_ubo(nir_instr *instr)
484 {
485 if (instr->type != nir_instr_type_intrinsic)
486 return false;
487
488 nir_intrinsic_op op = nir_instr_as_intrinsic(instr)->intrinsic;
489
490 /* nir_lower_ubo_vec4 happens after this pass. */
491 assert(op != nir_intrinsic_load_ubo_vec4);
492
493 return op == nir_intrinsic_load_ubo;
494 }
495
496 static bool
instr_is_load_const(nir_instr * instr)497 instr_is_load_const(nir_instr *instr)
498 {
499 if (instr->type != nir_instr_type_intrinsic)
500 return false;
501
502 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
503 nir_intrinsic_op op = intrin->intrinsic;
504
505 if (op != nir_intrinsic_load_global_ir3)
506 return false;
507
508 /* TODO handle non-aligned accesses */
509 if (nir_intrinsic_align_mul(intrin) < 16 ||
510 nir_intrinsic_align_offset(intrin) % 16 != 0)
511 return false;
512
513 enum gl_access_qualifier access = nir_intrinsic_access(intrin);
514 return (access & ACCESS_NON_WRITEABLE) && (access & ACCESS_CAN_SPECULATE);
515 }
516
517 /* For now, everything we upload is accessed statically and thus will be
518 * used by the shader. Once we can upload dynamically indexed data, we may
519 * upload sparsely accessed arrays, at which point we probably want to
520 * give priority to smaller UBOs, on the assumption that big UBOs will be
521 * accessed dynamically. Alternatively, we can track statically and
522 * dynamically accessed ranges separately and upload static rangtes
523 * first.
524 */
525 static void
assign_offsets(struct ir3_ubo_analysis_state * state,unsigned start,unsigned max_upload)526 assign_offsets(struct ir3_ubo_analysis_state *state, unsigned start,
527 unsigned max_upload)
528 {
529 uint32_t offset = 0;
530 for (uint32_t i = 0; i < state->num_enabled; i++) {
531 uint32_t range_size = state->range[i].end - state->range[i].start;
532
533 assert(offset <= max_upload);
534 state->range[i].offset = offset + start;
535 assert(offset <= max_upload);
536 offset += range_size;
537 }
538 state->size = offset;
539 }
540
541 /* Lowering to ldg to ldg.k + const uses the same infrastructure as lowering UBO
542 * loads, but must be done separately because the analysis and transform must be
543 * done in the same pass and we cannot reuse the main variant analysis for the
544 * binning variant.
545 */
546 bool
ir3_nir_lower_const_global_loads(nir_shader * nir,struct ir3_shader_variant * v)547 ir3_nir_lower_const_global_loads(nir_shader *nir, struct ir3_shader_variant *v)
548 {
549 const struct ir3_const_state *const_state = ir3_const_state(v);
550 struct ir3_compiler *compiler = v->compiler;
551
552 if (ir3_shader_debug & IR3_DBG_NOUBOOPT)
553 return false;
554
555 unsigned max_upload;
556 if (v->binning_pass) {
557 max_upload = const_state->global_size * 16;
558 } else {
559 struct ir3_const_state worst_case_const_state = {
560 .preamble_size = const_state->preamble_size,
561 };
562 ir3_setup_const_state(nir, v, &worst_case_const_state);
563 max_upload =
564 ir3_const_state_get_free_space(v, &worst_case_const_state) * 16;
565 }
566
567 struct ir3_ubo_analysis_state state = {};
568 uint32_t upload_remaining = max_upload;
569
570 nir_foreach_function (function, nir) {
571 if (function->impl && !function->is_preamble) {
572 nir_foreach_block (block, function->impl) {
573 nir_foreach_instr (instr, block) {
574 if (instr_is_load_const(instr) &&
575 ir3_def_is_rematerializable_for_preamble(nir_instr_as_intrinsic(instr)->src[0].ssa, NULL))
576 gather_ubo_ranges(nir, nir_instr_as_intrinsic(instr), &state,
577 compiler->const_upload_unit,
578 &upload_remaining);
579 }
580 }
581 }
582 }
583
584 uint32_t global_offset = v->shader_options.num_reserved_user_consts * 16;
585 assign_offsets(&state, global_offset, max_upload);
586
587 bool progress = copy_global_to_uniform(nir, &state);
588
589 if (progress) {
590 nir_foreach_function (function, nir) {
591 if (function->impl) {
592 if (function->is_preamble) {
593 nir_metadata_preserve(
594 function->impl, nir_metadata_all);
595 continue;
596 }
597
598 nir_builder builder = nir_builder_create(function->impl);
599 nir_foreach_block (block, function->impl) {
600 nir_foreach_instr_safe (instr, block) {
601 if (!instr_is_load_const(instr))
602 continue;
603 progress |= lower_ubo_load_to_uniform(
604 nir_instr_as_intrinsic(instr), &builder, &state, NULL,
605 compiler->const_upload_unit);
606 }
607 }
608
609 nir_metadata_preserve(
610 function->impl, nir_metadata_control_flow);
611 }
612 }
613 }
614
615 if (!v->binning_pass)
616 ir3_const_state_mut(v)->global_size = DIV_ROUND_UP(state.size, 16);
617
618 return progress;
619 }
620
621 void
ir3_nir_analyze_ubo_ranges(nir_shader * nir,struct ir3_shader_variant * v)622 ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader_variant *v)
623 {
624 struct ir3_const_state *const_state = ir3_const_state_mut(v);
625 struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
626 struct ir3_compiler *compiler = v->compiler;
627
628 /* Limit our uploads to the amount of constant buffer space available in
629 * the hardware, minus what the shader compiler may need for various
630 * driver params. We do this UBO-to-push-constant before the real
631 * allocation of the driver params' const space, because UBO pointers can
632 * be driver params but this pass usually eliminatings them.
633 */
634 struct ir3_const_state worst_case_const_state = {
635 .preamble_size = const_state->preamble_size,
636 .global_size = const_state->global_size,
637 };
638 ir3_setup_const_state(nir, v, &worst_case_const_state);
639 const uint32_t max_upload =
640 ir3_const_state_get_free_space(v, &worst_case_const_state) * 16;
641
642 memset(state, 0, sizeof(*state));
643
644 if (ir3_shader_debug & IR3_DBG_NOUBOOPT)
645 return;
646
647 uint32_t upload_remaining = max_upload;
648 bool push_ubos = compiler->options.push_ubo_with_preamble;
649
650 nir_foreach_function (function, nir) {
651 if (function->impl && (!push_ubos || !function->is_preamble)) {
652 nir_foreach_block (block, function->impl) {
653 nir_foreach_instr (instr, block) {
654 if (instr_is_load_ubo(instr))
655 gather_ubo_ranges(nir, nir_instr_as_intrinsic(instr), state,
656 compiler->const_upload_unit,
657 &upload_remaining);
658 }
659 }
660 }
661 }
662
663 uint32_t ubo_offset = v->shader_options.num_reserved_user_consts * 16 +
664 const_state->global_size * 16;
665 assign_offsets(state, ubo_offset, max_upload);
666 }
667
668 bool
ir3_nir_lower_ubo_loads(nir_shader * nir,struct ir3_shader_variant * v)669 ir3_nir_lower_ubo_loads(nir_shader *nir, struct ir3_shader_variant *v)
670 {
671 struct ir3_compiler *compiler = v->compiler;
672 /* For the binning pass variant, we re-use the corresponding draw-pass
673 * variants const_state and ubo state. To make these clear, in this
674 * pass it is const (read-only)
675 */
676 const struct ir3_const_state *const_state = ir3_const_state(v);
677 const struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
678
679 int num_ubos = 0;
680 bool progress = false;
681 bool has_preamble = false;
682 bool push_ubos = compiler->options.push_ubo_with_preamble;
683 nir_foreach_function (function, nir) {
684 if (function->impl) {
685 if (function->is_preamble && push_ubos) {
686 has_preamble = true;
687 nir_metadata_preserve(function->impl, nir_metadata_all);
688 continue;
689 }
690 nir_builder builder = nir_builder_create(function->impl);
691 nir_foreach_block (block, function->impl) {
692 nir_foreach_instr_safe (instr, block) {
693 if (!instr_is_load_ubo(instr))
694 continue;
695 progress |= lower_ubo_load_to_uniform(
696 nir_instr_as_intrinsic(instr), &builder, state, &num_ubos,
697 compiler->const_upload_unit);
698 }
699 }
700
701 nir_metadata_preserve(
702 function->impl, nir_metadata_control_flow);
703 }
704 }
705 /* Update the num_ubos field for GL (first_ubo_is_default_ubo). With
706 * Vulkan's bindless, we don't use the num_ubos field, so we can leave it
707 * incremented.
708 */
709 if (nir->info.first_ubo_is_default_ubo && !push_ubos && !has_preamble)
710 nir->info.num_ubos = num_ubos;
711
712 if (compiler->has_preamble && push_ubos)
713 progress |= copy_ubo_to_uniform(
714 nir, const_state, !compiler->load_shader_consts_via_preamble);
715
716 return progress;
717 }
718
719 static bool
fixup_load_const_ir3_filter(const nir_instr * instr,const void * arg)720 fixup_load_const_ir3_filter(const nir_instr *instr, const void *arg)
721 {
722 if (instr->type != nir_instr_type_intrinsic)
723 return false;
724 return nir_instr_as_intrinsic(instr)->intrinsic ==
725 nir_intrinsic_load_const_ir3;
726 }
727
728 static nir_def *
fixup_load_const_ir3_instr(struct nir_builder * b,nir_instr * instr,void * arg)729 fixup_load_const_ir3_instr(struct nir_builder *b, nir_instr *instr, void *arg)
730 {
731 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
732
733 /* We don't need to worry about non-indirect case: */
734 if (nir_src_is_const(intr->src[0]))
735 return NULL;
736
737 const unsigned base_offset_limit = (1 << 9); /* 9 bits */
738 unsigned base_offset = nir_intrinsic_base(intr);
739
740 /* Or cases were base offset is lower than the hw limit: */
741 if (base_offset < base_offset_limit)
742 return NULL;
743
744 b->cursor = nir_before_instr(instr);
745
746 nir_def *offset = intr->src[0].ssa;
747
748 /* We'd like to avoid a sequence like:
749 *
750 * vec4 32 ssa_18 = intrinsic load_const_ir3 (ssa_4) (1024, 0, 0)
751 * vec4 32 ssa_19 = intrinsic load_const_ir3 (ssa_4) (1072, 0, 0)
752 * vec4 32 ssa_20 = intrinsic load_const_ir3 (ssa_4) (1120, 0, 0)
753 *
754 * From turning into a unique offset value (which requires reloading
755 * a0.x for each instruction). So instead of just adding the constant
756 * base_offset to the non-const offset, be a bit more clever and only
757 * extract the part that cannot be encoded. Afterwards CSE should
758 * turn the result into:
759 *
760 * vec1 32 ssa_5 = load_const (1024)
761 * vec4 32 ssa_6 = iadd ssa4_, ssa_5
762 * vec4 32 ssa_18 = intrinsic load_const_ir3 (ssa_5) (0, 0, 0)
763 * vec4 32 ssa_19 = intrinsic load_const_ir3 (ssa_5) (48, 0, 0)
764 * vec4 32 ssa_20 = intrinsic load_const_ir3 (ssa_5) (96, 0, 0)
765 */
766 unsigned new_base_offset = base_offset % base_offset_limit;
767
768 nir_intrinsic_set_base(intr, new_base_offset);
769 offset = nir_iadd_imm(b, offset, base_offset - new_base_offset);
770
771 nir_src_rewrite(&intr->src[0], offset);
772
773 return NIR_LOWER_INSTR_PROGRESS;
774 }
775
776 /**
777 * For relative CONST file access, we can only encode 10b worth of fixed offset,
778 * so in cases where the base offset is larger, we need to peel it out into
779 * ALU instructions.
780 *
781 * This should run late, after constant folding has had a chance to do it's
782 * thing, so we can actually know if it is an indirect uniform offset or not.
783 */
784 bool
ir3_nir_fixup_load_const_ir3(nir_shader * nir)785 ir3_nir_fixup_load_const_ir3(nir_shader *nir)
786 {
787 return nir_shader_lower_instructions(nir, fixup_load_const_ir3_filter,
788 fixup_load_const_ir3_instr, NULL);
789 }
790 static nir_def *
ir3_nir_lower_load_const_instr(nir_builder * b,nir_instr * in_instr,void * data)791 ir3_nir_lower_load_const_instr(nir_builder *b, nir_instr *in_instr, void *data)
792 {
793 struct ir3_shader_variant *v = data;
794 nir_intrinsic_instr *instr = nir_instr_as_intrinsic(in_instr);
795
796 unsigned num_components = instr->num_components;
797 unsigned bit_size = instr->def.bit_size;
798 if (instr->def.bit_size == 16) {
799 /* We can't do 16b loads -- either from LDC (32-bit only in any of our
800 * traces, and disasm that doesn't look like it really supports it) or
801 * from the constant file (where CONSTANT_DEMOTION_ENABLE means we get
802 * automatic 32b-to-16b conversions when we ask for 16b from it).
803 * Instead, we'll load 32b from a UBO and unpack from there.
804 */
805 num_components = DIV_ROUND_UP(num_components, 2);
806 bit_size = 32;
807 }
808 unsigned base = nir_intrinsic_base(instr);
809 nir_def *index = ir3_get_driver_consts_ubo(b, v);
810 nir_def *offset =
811 nir_iadd_imm(b, instr->src[0].ssa, base);
812
813 nir_def *result =
814 nir_load_ubo(b, num_components, bit_size, index, offset,
815 .align_mul = nir_intrinsic_align_mul(instr),
816 .align_offset = nir_intrinsic_align_offset(instr),
817 .range_base = base, .range = nir_intrinsic_range(instr));
818
819 if (instr->def.bit_size == 16) {
820 result = nir_bitcast_vector(b, result, 16);
821 result = nir_trim_vector(b, result, instr->num_components);
822 }
823
824 return result;
825 }
826
827 static bool
ir3_lower_load_const_filter(const nir_instr * instr,const void * data)828 ir3_lower_load_const_filter(const nir_instr *instr, const void *data)
829 {
830 return (instr->type == nir_instr_type_intrinsic &&
831 nir_instr_as_intrinsic(instr)->intrinsic ==
832 nir_intrinsic_load_constant);
833 }
834
835 /* Lowers load_constant intrinsics to UBO accesses so we can run them through
836 * the general "upload to const file or leave as UBO access" code.
837 */
838 bool
ir3_nir_lower_load_constant(nir_shader * nir,struct ir3_shader_variant * v)839 ir3_nir_lower_load_constant(nir_shader *nir, struct ir3_shader_variant *v)
840 {
841 bool progress = nir_shader_lower_instructions(
842 nir, ir3_lower_load_const_filter, ir3_nir_lower_load_const_instr,
843 v);
844
845 if (progress) {
846 struct ir3_compiler *compiler = v->compiler;
847
848 /* Save a copy of the NIR constant data to the variant for
849 * inclusion in the final assembly.
850 */
851 v->constant_data_size =
852 align(nir->constant_data_size,
853 compiler->const_upload_unit * 4 * sizeof(uint32_t));
854 v->constant_data = rzalloc_size(v, v->constant_data_size);
855 memcpy(v->constant_data, nir->constant_data, nir->constant_data_size);
856 }
857
858 return progress;
859 }
860