xref: /aosp_15_r20/external/mesa3d/src/intel/compiler/brw_fs_reg_allocate.cpp (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <[email protected]>
25  *
26  */
27 
28 #include "brw_eu.h"
29 #include "brw_fs.h"
30 #include "brw_fs_builder.h"
31 #include "brw_cfg.h"
32 #include "util/set.h"
33 #include "util/register_allocate.h"
34 
35 using namespace brw;
36 
37 static void
assign_reg(const struct intel_device_info * devinfo,unsigned * reg_hw_locations,brw_reg * reg)38 assign_reg(const struct intel_device_info *devinfo,
39            unsigned *reg_hw_locations, brw_reg *reg)
40 {
41    if (reg->file == VGRF) {
42       reg->nr = reg_unit(devinfo) * reg_hw_locations[reg->nr] + reg->offset / REG_SIZE;
43       reg->offset %= REG_SIZE;
44    }
45 }
46 
47 void
brw_assign_regs_trivial(fs_visitor & s)48 brw_assign_regs_trivial(fs_visitor &s)
49 {
50    const struct intel_device_info *devinfo = s.devinfo;
51    unsigned hw_reg_mapping[s.alloc.count + 1];
52    unsigned i;
53    int reg_width = s.dispatch_width / 8;
54 
55    /* Note that compressed instructions require alignment to 2 registers. */
56    hw_reg_mapping[0] = ALIGN(s.first_non_payload_grf, reg_width);
57    for (i = 1; i <= s.alloc.count; i++) {
58       hw_reg_mapping[i] = (hw_reg_mapping[i - 1] +
59                            DIV_ROUND_UP(s.alloc.sizes[i - 1],
60                                         reg_unit(devinfo)));
61    }
62    s.grf_used = hw_reg_mapping[s.alloc.count];
63 
64    foreach_block_and_inst(block, fs_inst, inst, s.cfg) {
65       assign_reg(devinfo, hw_reg_mapping, &inst->dst);
66       for (i = 0; i < inst->sources; i++) {
67          assign_reg(devinfo, hw_reg_mapping, &inst->src[i]);
68       }
69    }
70 
71    if (s.grf_used >= BRW_MAX_GRF) {
72       s.fail("Ran out of regs on trivial allocator (%d/%d)\n",
73 	     s.grf_used, BRW_MAX_GRF);
74    } else {
75       s.alloc.count = s.grf_used;
76    }
77 
78 }
79 
80 extern "C" void
brw_fs_alloc_reg_sets(struct brw_compiler * compiler)81 brw_fs_alloc_reg_sets(struct brw_compiler *compiler)
82 {
83    const struct intel_device_info *devinfo = compiler->devinfo;
84    int base_reg_count = BRW_MAX_GRF;
85 
86    /* The registers used to make up almost all values handled in the compiler
87     * are a scalar value occupying a single register (or 2 registers in the
88     * case of SIMD16, which is handled by dividing base_reg_count by 2 and
89     * multiplying allocated register numbers by 2).  Things that were
90     * aggregates of scalar values at the GLSL level were split to scalar
91     * values by split_virtual_grfs().
92     *
93     * However, texture SEND messages return a series of contiguous registers
94     * to write into.  We currently always ask for 4 registers, but we may
95     * convert that to use less some day.
96     *
97     * Additionally, on gfx5 we need aligned pairs of registers for the PLN
98     * instruction, and on gfx4 we need 8 contiguous regs for workaround simd16
99     * texturing.
100     */
101    assert(REG_CLASS_COUNT == MAX_VGRF_SIZE(devinfo) / reg_unit(devinfo));
102    int class_sizes[REG_CLASS_COUNT];
103    for (unsigned i = 0; i < REG_CLASS_COUNT; i++)
104       class_sizes[i] = i + 1;
105 
106    struct ra_regs *regs = ra_alloc_reg_set(compiler, BRW_MAX_GRF, false);
107    ra_set_allocate_round_robin(regs);
108    struct ra_class **classes = ralloc_array(compiler, struct ra_class *,
109                                             REG_CLASS_COUNT);
110 
111    /* Now, make the register classes for each size of contiguous register
112     * allocation we might need to make.
113     */
114    for (int i = 0; i < REG_CLASS_COUNT; i++) {
115       classes[i] = ra_alloc_contig_reg_class(regs, class_sizes[i]);
116 
117       for (int reg = 0; reg <= base_reg_count - class_sizes[i]; reg++)
118          ra_class_add_reg(classes[i], reg);
119    }
120 
121    ra_set_finalize(regs, NULL);
122 
123    compiler->fs_reg_set.regs = regs;
124    for (unsigned i = 0; i < ARRAY_SIZE(compiler->fs_reg_set.classes); i++)
125       compiler->fs_reg_set.classes[i] = NULL;
126    for (int i = 0; i < REG_CLASS_COUNT; i++)
127       compiler->fs_reg_set.classes[class_sizes[i] - 1] = classes[i];
128 }
129 
130 static int
count_to_loop_end(const bblock_t * block)131 count_to_loop_end(const bblock_t *block)
132 {
133    if (block->end()->opcode == BRW_OPCODE_WHILE)
134       return block->end_ip;
135 
136    int depth = 1;
137    /* Skip the first block, since we don't want to count the do the calling
138     * function found.
139     */
140    for (block = block->next();
141         depth > 0;
142         block = block->next()) {
143       if (block->start()->opcode == BRW_OPCODE_DO)
144          depth++;
145       if (block->end()->opcode == BRW_OPCODE_WHILE) {
146          depth--;
147          if (depth == 0)
148             return block->end_ip;
149       }
150    }
151    unreachable("not reached");
152 }
153 
calculate_payload_ranges(bool allow_spilling,unsigned payload_node_count,int * payload_last_use_ip) const154 void fs_visitor::calculate_payload_ranges(bool allow_spilling,
155                                           unsigned payload_node_count,
156                                           int *payload_last_use_ip) const
157 {
158    int loop_depth = 0;
159    int loop_end_ip = 0;
160 
161    for (unsigned i = 0; i < payload_node_count; i++)
162       payload_last_use_ip[i] = -1;
163 
164    int ip = 0;
165    foreach_block_and_inst(block, fs_inst, inst, cfg) {
166       switch (inst->opcode) {
167       case BRW_OPCODE_DO:
168          loop_depth++;
169 
170          /* Since payload regs are deffed only at the start of the shader
171           * execution, any uses of the payload within a loop mean the live
172           * interval extends to the end of the outermost loop.  Find the ip of
173           * the end now.
174           */
175          if (loop_depth == 1)
176             loop_end_ip = count_to_loop_end(block);
177          break;
178       case BRW_OPCODE_WHILE:
179          loop_depth--;
180          break;
181       default:
182          break;
183       }
184 
185       int use_ip;
186       if (loop_depth > 0)
187          use_ip = loop_end_ip;
188       else
189          use_ip = ip;
190 
191       /* Note that UNIFORM args have been turned into FIXED_GRF by
192        * assign_curbe_setup(), and interpolation uses fixed hardware regs from
193        * the start (see interp_reg()).
194        */
195       for (int i = 0; i < inst->sources; i++) {
196          if (inst->src[i].file == FIXED_GRF) {
197             unsigned reg_nr = inst->src[i].nr;
198             if (reg_nr / reg_unit(devinfo) >= payload_node_count)
199                continue;
200 
201             for (unsigned j = reg_nr / reg_unit(devinfo);
202                  j < DIV_ROUND_UP(reg_nr + regs_read(inst, i),
203                                   reg_unit(devinfo));
204                  j++) {
205                payload_last_use_ip[j] = use_ip;
206                assert(j < payload_node_count);
207             }
208          }
209       }
210 
211       if (inst->dst.file == FIXED_GRF) {
212          unsigned reg_nr = inst->dst.nr;
213          if (reg_nr / reg_unit(devinfo) < payload_node_count) {
214             for (unsigned j = reg_nr / reg_unit(devinfo);
215                  j < DIV_ROUND_UP(reg_nr + regs_written(inst),
216                                   reg_unit(devinfo));
217                  j++) {
218                payload_last_use_ip[j] = use_ip;
219                assert(j < payload_node_count);
220             }
221          }
222       }
223 
224       /* The generator implicitly uses g0 to construct extended message
225        * descriptors for scratch send messages when this bit is set.
226        */
227       if (inst->send_ex_desc_scratch)
228          payload_last_use_ip[0] = use_ip;
229 
230       ip++;
231    }
232 
233    /* g0 is needed to construct scratch headers for spilling.  While we could
234     * extend its live range each time we spill a register, and update the
235     * interference graph accordingly, this would get pretty messy.  Instead,
236     * simply consider g0 live for the whole program if spilling is required.
237     */
238    if (allow_spilling)
239       payload_last_use_ip[0] = ip - 1;
240 }
241 
242 class fs_reg_alloc {
243 public:
fs_reg_alloc(fs_visitor * fs)244    fs_reg_alloc(fs_visitor *fs):
245       fs(fs), devinfo(fs->devinfo), compiler(fs->compiler),
246       live(fs->live_analysis.require()), g(NULL),
247       have_spill_costs(false)
248    {
249       mem_ctx = ralloc_context(NULL);
250 
251       /* Stash the number of instructions so we can sanity check that our
252        * counts still match liveness.
253        */
254       live_instr_count = fs->cfg->last_block()->end_ip + 1;
255 
256       spill_insts = _mesa_pointer_set_create(mem_ctx);
257 
258       /* Most of this allocation was written for a reg_width of 1
259        * (dispatch_width == 8).  In extending to SIMD16, the code was
260        * left in place and it was converted to have the hardware
261        * registers it's allocating be contiguous physical pairs of regs
262        * for reg_width == 2.
263        */
264       int reg_width = fs->dispatch_width / 8;
265       payload_node_count = ALIGN(fs->first_non_payload_grf, reg_width);
266 
267       /* Get payload IP information */
268       payload_last_use_ip = ralloc_array(mem_ctx, int, payload_node_count);
269 
270       node_count = 0;
271       first_payload_node = 0;
272       grf127_send_hack_node = 0;
273       first_vgrf_node = 0;
274       last_vgrf_node = 0;
275       first_spill_node = 0;
276 
277       spill_vgrf_ip = NULL;
278       spill_vgrf_ip_alloc = 0;
279       spill_node_count = 0;
280    }
281 
~fs_reg_alloc()282    ~fs_reg_alloc()
283    {
284       ralloc_free(mem_ctx);
285    }
286 
287    bool assign_regs(bool allow_spilling, bool spill_all);
288 
289 private:
290    void setup_live_interference(unsigned node,
291                                 int node_start_ip, int node_end_ip);
292    void setup_inst_interference(const fs_inst *inst);
293 
294    void build_interference_graph(bool allow_spilling);
295 
296    brw_reg build_lane_offsets(const fs_builder &bld,
297                              uint32_t spill_offset, int ip);
298    brw_reg build_single_offset(const fs_builder &bld,
299                               uint32_t spill_offset, int ip);
300    brw_reg build_legacy_scratch_header(const fs_builder &bld,
301                                        uint32_t spill_offset, int ip);
302 
303    void emit_unspill(const fs_builder &bld, struct shader_stats *stats,
304                      brw_reg dst, uint32_t spill_offset, unsigned count, int ip);
305    void emit_spill(const fs_builder &bld, struct shader_stats *stats,
306                    brw_reg src, uint32_t spill_offset, unsigned count, int ip);
307 
308    void set_spill_costs();
309    int choose_spill_reg();
310    brw_reg alloc_spill_reg(unsigned size, int ip);
311    void spill_reg(unsigned spill_reg);
312 
313    void *mem_ctx;
314    fs_visitor *fs;
315    const intel_device_info *devinfo;
316    const brw_compiler *compiler;
317    const fs_live_variables &live;
318    int live_instr_count;
319 
320    set *spill_insts;
321 
322    ra_graph *g;
323    bool have_spill_costs;
324 
325    int payload_node_count;
326    int *payload_last_use_ip;
327 
328    int node_count;
329    int first_payload_node;
330    int grf127_send_hack_node;
331    int first_vgrf_node;
332    int last_vgrf_node;
333    int first_spill_node;
334 
335    int *spill_vgrf_ip;
336    int spill_vgrf_ip_alloc;
337    int spill_node_count;
338 };
339 
340 namespace {
341    /**
342     * Maximum spill block size we expect to encounter in 32B units.
343     *
344     * This is somewhat arbitrary and doesn't necessarily limit the maximum
345     * variable size that can be spilled -- A higher value will allow a
346     * variable of a given size to be spilled more efficiently with a smaller
347     * number of scratch messages, but will increase the likelihood of a
348     * collision between the MRFs reserved for spilling and other MRFs used by
349     * the program (and possibly increase GRF register pressure on platforms
350     * without hardware MRFs), what could cause register allocation to fail.
351     *
352     * For the moment reserve just enough space so a register of 32 bit
353     * component type and natural region width can be spilled without splitting
354     * into multiple (force_writemask_all) scratch messages.
355     */
356    unsigned
spill_max_size(const fs_visitor * s)357    spill_max_size(const fs_visitor *s)
358    {
359       /* LSC is limited to SIMD16 sends */
360       if (s->devinfo->has_lsc)
361          return 2;
362 
363       /* FINISHME - On Gfx7+ it should be possible to avoid this limit
364        *            altogether by spilling directly from the temporary GRF
365        *            allocated to hold the result of the instruction (and the
366        *            scratch write header).
367        */
368       /* FINISHME - The shader's dispatch width probably belongs in
369        *            backend_shader (or some nonexistent fs_shader class?)
370        *            rather than in the visitor class.
371        */
372       return s->dispatch_width / 8;
373    }
374 }
375 
376 void
setup_live_interference(unsigned node,int node_start_ip,int node_end_ip)377 fs_reg_alloc::setup_live_interference(unsigned node,
378                                       int node_start_ip, int node_end_ip)
379 {
380    /* Mark any virtual grf that is live between the start of the program and
381     * the last use of a payload node interfering with that payload node.
382     */
383    for (int i = 0; i < payload_node_count; i++) {
384       if (payload_last_use_ip[i] == -1)
385          continue;
386 
387       /* Note that we use a <= comparison, unlike vgrfs_interfere(),
388        * in order to not have to worry about the uniform issue described in
389        * calculate_live_intervals().
390        */
391       if (node_start_ip <= payload_last_use_ip[i])
392          ra_add_node_interference(g, node, first_payload_node + i);
393    }
394 
395    /* Add interference with every vgrf whose live range intersects this
396     * node's.  We only need to look at nodes below this one as the reflexivity
397     * of interference will take care of the rest.
398     */
399    for (unsigned n2 = first_vgrf_node;
400         n2 <= (unsigned)last_vgrf_node && n2 < node; n2++) {
401       unsigned vgrf = n2 - first_vgrf_node;
402       if (!(node_end_ip <= live.vgrf_start[vgrf] ||
403             live.vgrf_end[vgrf] <= node_start_ip))
404          ra_add_node_interference(g, node, n2);
405    }
406 }
407 
408 void
setup_inst_interference(const fs_inst * inst)409 fs_reg_alloc::setup_inst_interference(const fs_inst *inst)
410 {
411    /* Certain instructions can't safely use the same register for their
412     * sources and destination.  Add interference.
413     */
414    if (inst->dst.file == VGRF && inst->has_source_and_destination_hazard()) {
415       for (unsigned i = 0; i < inst->sources; i++) {
416          if (inst->src[i].file == VGRF) {
417             ra_add_node_interference(g, first_vgrf_node + inst->dst.nr,
418                                         first_vgrf_node + inst->src[i].nr);
419          }
420       }
421    }
422 
423    /* A compressed instruction is actually two instructions executed
424     * simultaneously.  On most platforms, it ok to have the source and
425     * destination registers be the same.  In this case, each instruction
426     * over-writes its own source and there's no problem.  The real problem
427     * here is if the source and destination registers are off by one.  Then
428     * you can end up in a scenario where the first instruction over-writes the
429     * source of the second instruction.  Since the compiler doesn't know about
430     * this level of granularity, we simply make the source and destination
431     * interfere.
432     */
433    if (inst->dst.component_size(inst->exec_size) > REG_SIZE &&
434        inst->dst.file == VGRF) {
435       for (int i = 0; i < inst->sources; ++i) {
436          if (inst->src[i].file == VGRF) {
437             ra_add_node_interference(g, first_vgrf_node + inst->dst.nr,
438                                         first_vgrf_node + inst->src[i].nr);
439          }
440       }
441    }
442 
443    if (grf127_send_hack_node >= 0) {
444       /* At Intel Broadwell PRM, vol 07, section "Instruction Set Reference",
445        * subsection "EUISA Instructions", Send Message (page 990):
446        *
447        * "r127 must not be used for return address when there is a src and
448        * dest overlap in send instruction."
449        *
450        * We are avoiding using grf127 as part of the destination of send
451        * messages adding a node interference to the grf127_send_hack_node.
452        * This node has a fixed assignment to grf127.
453        *
454        * We don't apply it to SIMD16 instructions because previous code avoids
455        * any register overlap between sources and destination.
456        */
457       if (inst->exec_size < 16 && inst->is_send_from_grf() &&
458           inst->dst.file == VGRF)
459          ra_add_node_interference(g, first_vgrf_node + inst->dst.nr,
460                                      grf127_send_hack_node);
461    }
462 
463    /* From the Skylake PRM Vol. 2a docs for sends:
464     *
465     *    "It is required that the second block of GRFs does not overlap with
466     *    the first block."
467     *
468     * Normally, this is taken care of by fixup_sends_duplicate_payload() but
469     * in the case where one of the registers is an undefined value, the
470     * register allocator may decide that they don't interfere even though
471     * they're used as sources in the same instruction.  We also need to add
472     * interference here.
473     */
474    if (inst->opcode == SHADER_OPCODE_SEND && inst->ex_mlen > 0 &&
475        inst->src[2].file == VGRF && inst->src[3].file == VGRF &&
476        inst->src[2].nr != inst->src[3].nr)
477       ra_add_node_interference(g, first_vgrf_node + inst->src[2].nr,
478                                   first_vgrf_node + inst->src[3].nr);
479 
480    /* When we do send-from-GRF for FB writes, we need to ensure that the last
481     * write instruction sends from a high register.  This is because the
482     * vertex fetcher wants to start filling the low payload registers while
483     * the pixel data port is still working on writing out the memory.  If we
484     * don't do this, we get rendering artifacts.
485     *
486     * We could just do "something high".  Instead, we just pick the highest
487     * register that works.
488     */
489    if (inst->eot) {
490       const int vgrf = inst->opcode == SHADER_OPCODE_SEND ?
491                        inst->src[2].nr : inst->src[0].nr;
492       const int size = DIV_ROUND_UP(fs->alloc.sizes[vgrf], reg_unit(devinfo));
493       int reg = BRW_MAX_GRF - size;
494 
495       if (grf127_send_hack_node >= 0) {
496          /* Avoid r127 which might be unusable if the node was previously
497           * written by a SIMD8 SEND message with source/destination overlap.
498           */
499          reg--;
500       }
501 
502       assert(reg >= 112);
503       ra_set_node_reg(g, first_vgrf_node + vgrf, reg);
504 
505       if (inst->ex_mlen > 0) {
506          const int vgrf = inst->src[3].nr;
507          reg -= DIV_ROUND_UP(fs->alloc.sizes[vgrf], reg_unit(devinfo));
508          assert(reg >= 112);
509          ra_set_node_reg(g, first_vgrf_node + vgrf, reg);
510       }
511    }
512 }
513 
514 void
build_interference_graph(bool allow_spilling)515 fs_reg_alloc::build_interference_graph(bool allow_spilling)
516 {
517    /* Compute the RA node layout */
518    node_count = 0;
519    first_payload_node = node_count;
520    node_count += payload_node_count;
521 
522    grf127_send_hack_node = node_count;
523    node_count++;
524 
525    first_vgrf_node = node_count;
526    node_count += fs->alloc.count;
527    last_vgrf_node = node_count - 1;
528    first_spill_node = node_count;
529 
530    fs->calculate_payload_ranges(allow_spilling, payload_node_count,
531                                 payload_last_use_ip);
532 
533    assert(g == NULL);
534    g = ra_alloc_interference_graph(compiler->fs_reg_set.regs, node_count);
535    ralloc_steal(mem_ctx, g);
536 
537    /* Set up the payload nodes */
538    for (int i = 0; i < payload_node_count; i++)
539       ra_set_node_reg(g, first_payload_node + i, i);
540 
541    if (grf127_send_hack_node >= 0)
542       ra_set_node_reg(g, grf127_send_hack_node, 127);
543 
544    /* Specify the classes of each virtual register. */
545    for (unsigned i = 0; i < fs->alloc.count; i++) {
546       unsigned size = DIV_ROUND_UP(fs->alloc.sizes[i], reg_unit(devinfo));
547 
548       assert(size <= ARRAY_SIZE(compiler->fs_reg_set.classes) &&
549              "Register allocation relies on split_virtual_grfs()");
550 
551       ra_set_node_class(g, first_vgrf_node + i,
552                         compiler->fs_reg_set.classes[size - 1]);
553    }
554 
555    /* Add interference based on the live range of the register */
556    for (unsigned i = 0; i < fs->alloc.count; i++) {
557       setup_live_interference(first_vgrf_node + i,
558                               live.vgrf_start[i],
559                               live.vgrf_end[i]);
560    }
561 
562    /* Add interference based on the instructions in which a register is used.
563     */
564    foreach_block_and_inst(block, fs_inst, inst, fs->cfg)
565       setup_inst_interference(inst);
566 }
567 
568 brw_reg
build_single_offset(const fs_builder & bld,uint32_t spill_offset,int ip)569 fs_reg_alloc::build_single_offset(const fs_builder &bld, uint32_t spill_offset, int ip)
570 {
571    brw_reg offset = retype(alloc_spill_reg(1, ip), BRW_TYPE_UD);
572    fs_inst *inst = bld.MOV(offset, brw_imm_ud(spill_offset));
573    _mesa_set_add(spill_insts, inst);
574    return offset;
575 }
576 
577 brw_reg
build_lane_offsets(const fs_builder & bld,uint32_t spill_offset,int ip)578 fs_reg_alloc::build_lane_offsets(const fs_builder &bld, uint32_t spill_offset, int ip)
579 {
580    /* LSC messages are limited to SIMD16 */
581    assert(bld.dispatch_width() <= 16);
582 
583    const fs_builder ubld = bld.exec_all();
584    const unsigned reg_count = ubld.dispatch_width() / 8;
585 
586    brw_reg offset = retype(alloc_spill_reg(reg_count, ip), BRW_TYPE_UD);
587    fs_inst *inst;
588 
589    /* Build an offset per lane in SIMD8 */
590    inst = ubld.group(8, 0).MOV(retype(offset, BRW_TYPE_UW),
591                                brw_imm_uv(0x76543210));
592    _mesa_set_add(spill_insts, inst);
593    inst = ubld.group(8, 0).MOV(offset, retype(offset, BRW_TYPE_UW));
594    _mesa_set_add(spill_insts, inst);
595 
596    /* Build offsets in the upper 8 lanes of SIMD16 */
597    if (ubld.dispatch_width() > 8) {
598       inst = ubld.group(8, 0).ADD(
599          byte_offset(offset, REG_SIZE),
600          byte_offset(offset, 0),
601          brw_imm_ud(8));
602       _mesa_set_add(spill_insts, inst);
603    }
604 
605    /* Make the offset a dword */
606    inst = ubld.SHL(offset, offset, brw_imm_ud(2));
607    _mesa_set_add(spill_insts, inst);
608 
609    /* Add the base offset */
610    inst = ubld.ADD(offset, offset, brw_imm_ud(spill_offset));
611    _mesa_set_add(spill_insts, inst);
612 
613    return offset;
614 }
615 
616 /**
617  * Generate a scratch header for pre-LSC platforms.
618  */
619 brw_reg
build_legacy_scratch_header(const fs_builder & bld,uint32_t spill_offset,int ip)620 fs_reg_alloc::build_legacy_scratch_header(const fs_builder &bld,
621                                           uint32_t spill_offset, int ip)
622 {
623    const fs_builder ubld8 = bld.exec_all().group(8, 0);
624    const fs_builder ubld1 = bld.exec_all().group(1, 0);
625 
626    /* Allocate a spill header and make it interfere with g0 */
627    brw_reg header = retype(alloc_spill_reg(1, ip), BRW_TYPE_UD);
628    ra_add_node_interference(g, first_vgrf_node + header.nr, first_payload_node);
629 
630    fs_inst *inst =
631       ubld8.emit(SHADER_OPCODE_SCRATCH_HEADER, header, brw_ud8_grf(0, 0));
632    _mesa_set_add(spill_insts, inst);
633 
634    /* Write the scratch offset */
635    assert(spill_offset % 16 == 0);
636    inst = ubld1.MOV(component(header, 2), brw_imm_ud(spill_offset / 16));
637    _mesa_set_add(spill_insts, inst);
638 
639    return header;
640 }
641 
642 void
emit_unspill(const fs_builder & bld,struct shader_stats * stats,brw_reg dst,uint32_t spill_offset,unsigned count,int ip)643 fs_reg_alloc::emit_unspill(const fs_builder &bld,
644                            struct shader_stats *stats,
645                            brw_reg dst,
646                            uint32_t spill_offset, unsigned count, int ip)
647 {
648    const intel_device_info *devinfo = bld.shader->devinfo;
649    const unsigned reg_size = dst.component_size(bld.dispatch_width()) /
650                              REG_SIZE;
651 
652    for (unsigned i = 0; i < DIV_ROUND_UP(count, reg_size); i++) {
653       ++stats->fill_count;
654 
655       fs_inst *unspill_inst;
656       if (devinfo->verx10 >= 125) {
657          /* LSC is limited to SIMD16 load/store but we can load more using
658           * transpose messages.
659           */
660          const bool use_transpose = bld.dispatch_width() > 16;
661          const fs_builder ubld = use_transpose ? bld.exec_all().group(1, 0) : bld;
662          brw_reg offset;
663          if (use_transpose) {
664             offset = build_single_offset(ubld, spill_offset, ip);
665          } else {
666             offset = build_lane_offsets(ubld, spill_offset, ip);
667          }
668          /* We leave the extended descriptor empty and flag the instruction to
669           * ask the generated to insert the extended descriptor in the address
670           * register. That way we don't need to burn an additional register
671           * for register allocation spill/fill.
672           */
673          brw_reg srcs[] = {
674             brw_imm_ud(0), /* desc */
675             brw_imm_ud(0), /* ex_desc */
676             offset,        /* payload */
677             brw_reg(),      /* payload2 */
678          };
679 
680          unspill_inst = ubld.emit(SHADER_OPCODE_SEND, dst,
681                                   srcs, ARRAY_SIZE(srcs));
682          unspill_inst->sfid = GFX12_SFID_UGM;
683          unspill_inst->desc = lsc_msg_desc(devinfo, LSC_OP_LOAD,
684                                            LSC_ADDR_SURFTYPE_SS,
685                                            LSC_ADDR_SIZE_A32,
686                                            LSC_DATA_SIZE_D32,
687                                            use_transpose ? reg_size * 8 : 1 /* num_channels */,
688                                            use_transpose,
689                                            LSC_CACHE(devinfo, LOAD, L1STATE_L3MOCS));
690          unspill_inst->header_size = 0;
691          unspill_inst->mlen = lsc_msg_addr_len(devinfo, LSC_ADDR_SIZE_A32,
692                                                unspill_inst->exec_size);
693          unspill_inst->ex_mlen = 0;
694          unspill_inst->size_written =
695             lsc_msg_dest_len(devinfo, LSC_DATA_SIZE_D32, bld.dispatch_width()) * REG_SIZE;
696          unspill_inst->send_has_side_effects = false;
697          unspill_inst->send_is_volatile = true;
698          unspill_inst->send_ex_desc_scratch = true;
699       } else {
700          brw_reg header = build_legacy_scratch_header(bld, spill_offset, ip);
701 
702          const unsigned bti = GFX8_BTI_STATELESS_NON_COHERENT;
703          const brw_reg ex_desc = brw_imm_ud(0);
704 
705          brw_reg srcs[] = { brw_imm_ud(0), ex_desc, header };
706          unspill_inst = bld.emit(SHADER_OPCODE_SEND, dst,
707                                  srcs, ARRAY_SIZE(srcs));
708          unspill_inst->mlen = 1;
709          unspill_inst->header_size = 1;
710          unspill_inst->size_written = reg_size * REG_SIZE;
711          unspill_inst->send_has_side_effects = false;
712          unspill_inst->send_is_volatile = true;
713          unspill_inst->sfid = GFX7_SFID_DATAPORT_DATA_CACHE;
714          unspill_inst->desc =
715             brw_dp_desc(devinfo, bti,
716                         BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
717                         BRW_DATAPORT_OWORD_BLOCK_DWORDS(reg_size * 8));
718       }
719       _mesa_set_add(spill_insts, unspill_inst);
720       assert(unspill_inst->force_writemask_all || count % reg_size == 0);
721 
722       dst.offset += reg_size * REG_SIZE;
723       spill_offset += reg_size * REG_SIZE;
724    }
725 }
726 
727 void
emit_spill(const fs_builder & bld,struct shader_stats * stats,brw_reg src,uint32_t spill_offset,unsigned count,int ip)728 fs_reg_alloc::emit_spill(const fs_builder &bld,
729                          struct shader_stats *stats,
730                          brw_reg src,
731                          uint32_t spill_offset, unsigned count, int ip)
732 {
733    const intel_device_info *devinfo = bld.shader->devinfo;
734    const unsigned reg_size = src.component_size(bld.dispatch_width()) /
735                              REG_SIZE;
736 
737    for (unsigned i = 0; i < DIV_ROUND_UP(count, reg_size); i++) {
738       ++stats->spill_count;
739 
740       fs_inst *spill_inst;
741       if (devinfo->verx10 >= 125) {
742          brw_reg offset = build_lane_offsets(bld, spill_offset, ip);
743          /* We leave the extended descriptor empty and flag the instruction
744           * relocate the extended descriptor. That way the surface offset is
745           * directly put into the instruction and we don't need to use a
746           * register to hold it.
747           */
748          brw_reg srcs[] = {
749             brw_imm_ud(0),        /* desc */
750             brw_imm_ud(0),        /* ex_desc */
751             offset,               /* payload */
752             src,                  /* payload2 */
753          };
754          spill_inst = bld.emit(SHADER_OPCODE_SEND, bld.null_reg_f(),
755                                srcs, ARRAY_SIZE(srcs));
756          spill_inst->sfid = GFX12_SFID_UGM;
757          spill_inst->desc = lsc_msg_desc(devinfo, LSC_OP_STORE,
758                                          LSC_ADDR_SURFTYPE_SS,
759                                          LSC_ADDR_SIZE_A32,
760                                          LSC_DATA_SIZE_D32,
761                                          1 /* num_channels */,
762                                          false /* transpose */,
763                                          LSC_CACHE(devinfo, LOAD, L1STATE_L3MOCS));
764          spill_inst->header_size = 0;
765          spill_inst->mlen = lsc_msg_addr_len(devinfo, LSC_ADDR_SIZE_A32,
766                                              bld.dispatch_width());
767          spill_inst->ex_mlen = reg_size;
768          spill_inst->size_written = 0;
769          spill_inst->send_has_side_effects = true;
770          spill_inst->send_is_volatile = false;
771          spill_inst->send_ex_desc_scratch = true;
772       } else {
773          brw_reg header = build_legacy_scratch_header(bld, spill_offset, ip);
774 
775          const unsigned bti = GFX8_BTI_STATELESS_NON_COHERENT;
776          const brw_reg ex_desc = brw_imm_ud(0);
777 
778          brw_reg srcs[] = { brw_imm_ud(0), ex_desc, header, src };
779          spill_inst = bld.emit(SHADER_OPCODE_SEND, bld.null_reg_f(),
780                                srcs, ARRAY_SIZE(srcs));
781          spill_inst->mlen = 1;
782          spill_inst->ex_mlen = reg_size;
783          spill_inst->size_written = 0;
784          spill_inst->header_size = 1;
785          spill_inst->send_has_side_effects = true;
786          spill_inst->send_is_volatile = false;
787          spill_inst->sfid = GFX7_SFID_DATAPORT_DATA_CACHE;
788          spill_inst->desc =
789             brw_dp_desc(devinfo, bti,
790                         GFX6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE,
791                         BRW_DATAPORT_OWORD_BLOCK_DWORDS(reg_size * 8));
792       }
793       _mesa_set_add(spill_insts, spill_inst);
794       assert(spill_inst->force_writemask_all || count % reg_size == 0);
795 
796       src.offset += reg_size * REG_SIZE;
797       spill_offset += reg_size * REG_SIZE;
798    }
799 }
800 
801 void
set_spill_costs()802 fs_reg_alloc::set_spill_costs()
803 {
804    float block_scale = 1.0;
805    float spill_costs[fs->alloc.count];
806    bool no_spill[fs->alloc.count];
807 
808    for (unsigned i = 0; i < fs->alloc.count; i++) {
809       spill_costs[i] = 0.0;
810       no_spill[i] = false;
811    }
812 
813    /* Calculate costs for spilling nodes.  Call it a cost of 1 per
814     * spill/unspill we'll have to do, and guess that the insides of
815     * loops run 10 times.
816     */
817    foreach_block_and_inst(block, fs_inst, inst, fs->cfg) {
818       for (unsigned int i = 0; i < inst->sources; i++) {
819 	 if (inst->src[i].file == VGRF)
820             spill_costs[inst->src[i].nr] += regs_read(inst, i) * block_scale;
821       }
822 
823       if (inst->dst.file == VGRF)
824          spill_costs[inst->dst.nr] += regs_written(inst) * block_scale;
825 
826       /* Don't spill anything we generated while spilling */
827       if (_mesa_set_search(spill_insts, inst)) {
828          for (unsigned int i = 0; i < inst->sources; i++) {
829 	    if (inst->src[i].file == VGRF)
830                no_spill[inst->src[i].nr] = true;
831          }
832 	 if (inst->dst.file == VGRF)
833             no_spill[inst->dst.nr] = true;
834       }
835 
836       switch (inst->opcode) {
837 
838       case BRW_OPCODE_DO:
839 	 block_scale *= 10;
840 	 break;
841 
842       case BRW_OPCODE_WHILE:
843 	 block_scale /= 10;
844 	 break;
845 
846       case BRW_OPCODE_IF:
847          block_scale *= 0.5;
848          break;
849 
850       case BRW_OPCODE_ENDIF:
851          block_scale /= 0.5;
852          break;
853 
854       default:
855 	 break;
856       }
857    }
858 
859    for (unsigned i = 0; i < fs->alloc.count; i++) {
860       /* Do the no_spill check first.  Registers that are used as spill
861        * temporaries may have been allocated after we calculated liveness so
862        * we shouldn't look their liveness up.  Fortunately, they're always
863        * used in SCRATCH_READ/WRITE instructions so they'll always be flagged
864        * no_spill.
865        */
866       if (no_spill[i])
867          continue;
868 
869       int live_length = live.vgrf_end[i] - live.vgrf_start[i];
870       if (live_length <= 0)
871          continue;
872 
873       /* Divide the cost (in number of spills/fills) by the log of the length
874        * of the live range of the register.  This will encourage spill logic
875        * to spill long-living things before spilling short-lived things where
876        * spilling is less likely to actually do us any good.  We use the log
877        * of the length because it will fall off very quickly and not cause us
878        * to spill medium length registers with more uses.
879        */
880       float adjusted_cost = spill_costs[i] / logf(live_length);
881       ra_set_node_spill_cost(g, first_vgrf_node + i, adjusted_cost);
882    }
883 
884    have_spill_costs = true;
885 }
886 
887 int
choose_spill_reg()888 fs_reg_alloc::choose_spill_reg()
889 {
890    if (!have_spill_costs)
891       set_spill_costs();
892 
893    int node = ra_get_best_spill_node(g);
894    if (node < 0)
895       return -1;
896 
897    assert(node >= first_vgrf_node);
898    return node - first_vgrf_node;
899 }
900 
901 brw_reg
alloc_spill_reg(unsigned size,int ip)902 fs_reg_alloc::alloc_spill_reg(unsigned size, int ip)
903 {
904    int vgrf = fs->alloc.allocate(ALIGN(size, reg_unit(devinfo)));
905    int class_idx = DIV_ROUND_UP(size, reg_unit(devinfo)) - 1;
906    int n = ra_add_node(g, compiler->fs_reg_set.classes[class_idx]);
907    assert(n == first_vgrf_node + vgrf);
908    assert(n == first_spill_node + spill_node_count);
909 
910    setup_live_interference(n, ip - 1, ip + 1);
911 
912    /* Add interference between this spill node and any other spill nodes for
913     * the same instruction.
914     */
915    for (int s = 0; s < spill_node_count; s++) {
916       if (spill_vgrf_ip[s] == ip)
917          ra_add_node_interference(g, n, first_spill_node + s);
918    }
919 
920    /* Add this spill node to the list for next time */
921    if (spill_node_count >= spill_vgrf_ip_alloc) {
922       if (spill_vgrf_ip_alloc == 0)
923          spill_vgrf_ip_alloc = 16;
924       else
925          spill_vgrf_ip_alloc *= 2;
926       spill_vgrf_ip = reralloc(mem_ctx, spill_vgrf_ip, int,
927                                spill_vgrf_ip_alloc);
928    }
929    spill_vgrf_ip[spill_node_count++] = ip;
930 
931    return brw_vgrf(vgrf, BRW_TYPE_F);
932 }
933 
934 void
spill_reg(unsigned spill_reg)935 fs_reg_alloc::spill_reg(unsigned spill_reg)
936 {
937    int size = fs->alloc.sizes[spill_reg];
938    unsigned int spill_offset = fs->last_scratch;
939    assert(ALIGN(spill_offset, 16) == spill_offset); /* oword read/write req. */
940 
941    fs->spilled_any_registers = true;
942 
943    fs->last_scratch += align(size * REG_SIZE, REG_SIZE * reg_unit(devinfo));
944 
945    /* We're about to replace all uses of this register.  It no longer
946     * conflicts with anything so we can get rid of its interference.
947     */
948    ra_set_node_spill_cost(g, first_vgrf_node + spill_reg, 0);
949    ra_reset_node_interference(g, first_vgrf_node + spill_reg);
950 
951    /* Generate spill/unspill instructions for the objects being
952     * spilled.  Right now, we spill or unspill the whole thing to a
953     * virtual grf of the same size.  For most instructions, though, we
954     * could just spill/unspill the GRF being accessed.
955     */
956    int ip = 0;
957    foreach_block_and_inst (block, fs_inst, inst, fs->cfg) {
958       const fs_builder ibld = fs_builder(fs, block, inst);
959       exec_node *before = inst->prev;
960       exec_node *after = inst->next;
961 
962       for (unsigned int i = 0; i < inst->sources; i++) {
963 	 if (inst->src[i].file == VGRF &&
964              inst->src[i].nr == spill_reg) {
965             int count = regs_read(inst, i);
966             int subset_spill_offset = spill_offset +
967                ROUND_DOWN_TO(inst->src[i].offset, REG_SIZE * reg_unit(devinfo));
968             brw_reg unspill_dst = alloc_spill_reg(count, ip);
969 
970             inst->src[i].nr = unspill_dst.nr;
971             inst->src[i].offset %= REG_SIZE;
972 
973             /* We read the largest power-of-two divisor of the register count
974              * (because only POT scratch read blocks are allowed by the
975              * hardware) up to the maximum supported block size.
976              */
977             const unsigned width =
978                MIN2(32, 1u << (ffs(MAX2(1, count) * 8) - 1));
979 
980             /* Set exec_all() on unspill messages under the (rather
981              * pessimistic) assumption that there is no one-to-one
982              * correspondence between channels of the spilled variable in
983              * scratch space and the scratch read message, which operates on
984              * 32 bit channels.  It shouldn't hurt in any case because the
985              * unspill destination is a block-local temporary.
986              */
987             emit_unspill(ibld.exec_all().group(width, 0), &fs->shader_stats,
988                          unspill_dst, subset_spill_offset, count, ip);
989 	 }
990       }
991 
992       if (inst->dst.file == VGRF &&
993           inst->dst.nr == spill_reg &&
994           inst->opcode != SHADER_OPCODE_UNDEF) {
995          int subset_spill_offset = spill_offset +
996             ROUND_DOWN_TO(inst->dst.offset, reg_unit(devinfo) * REG_SIZE);
997          brw_reg spill_src = alloc_spill_reg(regs_written(inst), ip);
998 
999          inst->dst.nr = spill_src.nr;
1000          inst->dst.offset %= REG_SIZE;
1001 
1002          /* If we're immediately spilling the register, we should not use
1003           * destination dependency hints.  Doing so will cause the GPU do
1004           * try to read and write the register at the same time and may
1005           * hang the GPU.
1006           */
1007          inst->no_dd_clear = false;
1008          inst->no_dd_check = false;
1009 
1010          /* Calculate the execution width of the scratch messages (which work
1011           * in terms of 32 bit components so we have a fixed number of eight
1012           * channels per spilled register).  We attempt to write one
1013           * exec_size-wide component of the variable at a time without
1014           * exceeding the maximum number of (fake) MRF registers reserved for
1015           * spills.
1016           */
1017          const unsigned width = 8 * reg_unit(devinfo) *
1018             DIV_ROUND_UP(MIN2(inst->dst.component_size(inst->exec_size),
1019                               spill_max_size(fs) * REG_SIZE),
1020                          reg_unit(devinfo) * REG_SIZE);
1021 
1022          /* Spills should only write data initialized by the instruction for
1023           * whichever channels are enabled in the execution mask.  If that's
1024           * not possible we'll have to emit a matching unspill before the
1025           * instruction and set force_writemask_all on the spill.
1026           */
1027          const bool per_channel =
1028             inst->dst.is_contiguous() &&
1029             brw_type_size_bytes(inst->dst.type) == 4 &&
1030             inst->exec_size == width;
1031 
1032          /* Builder used to emit the scratch messages. */
1033          const fs_builder ubld = ibld.exec_all(!per_channel).group(width, 0);
1034 
1035 	 /* If our write is going to affect just part of the
1036           * regs_written(inst), then we need to unspill the destination since
1037           * we write back out all of the regs_written().  If the original
1038           * instruction had force_writemask_all set and is not a partial
1039           * write, there should be no need for the unspill since the
1040           * instruction will be overwriting the whole destination in any case.
1041 	  */
1042          if (inst->is_partial_write() ||
1043              (!inst->force_writemask_all && !per_channel))
1044             emit_unspill(ubld, &fs->shader_stats, spill_src,
1045                          subset_spill_offset, regs_written(inst), ip);
1046 
1047          emit_spill(ubld.at(block, inst->next), &fs->shader_stats, spill_src,
1048                     subset_spill_offset, regs_written(inst), ip);
1049       }
1050 
1051       for (fs_inst *inst = (fs_inst *)before->next;
1052            inst != after; inst = (fs_inst *)inst->next)
1053          setup_inst_interference(inst);
1054 
1055       /* We don't advance the ip for scratch read/write instructions
1056        * because we consider them to have the same ip as instruction we're
1057        * spilling around for the purposes of interference.  Also, we're
1058        * inserting spill instructions without re-running liveness analysis
1059        * and we don't want to mess up our IPs.
1060        */
1061       if (!_mesa_set_search(spill_insts, inst))
1062          ip++;
1063    }
1064 
1065    assert(ip == live_instr_count);
1066 }
1067 
1068 bool
assign_regs(bool allow_spilling,bool spill_all)1069 fs_reg_alloc::assign_regs(bool allow_spilling, bool spill_all)
1070 {
1071    build_interference_graph(allow_spilling);
1072 
1073    unsigned spilled = 0;
1074    while (1) {
1075       /* Debug of register spilling: Go spill everything. */
1076       if (unlikely(spill_all)) {
1077          int reg = choose_spill_reg();
1078          if (reg != -1) {
1079             spill_reg(reg);
1080             continue;
1081          }
1082       }
1083 
1084       if (ra_allocate(g))
1085          break;
1086 
1087       if (!allow_spilling)
1088          return false;
1089 
1090       /* Failed to allocate registers.  Spill some regs, and the caller will
1091        * loop back into here to try again.
1092        */
1093       unsigned nr_spills = 1;
1094       if (compiler->spilling_rate)
1095          nr_spills = MAX2(1, spilled / compiler->spilling_rate);
1096 
1097       for (unsigned j = 0; j < nr_spills; j++) {
1098          int reg = choose_spill_reg();
1099          if (reg == -1) {
1100             if (j == 0)
1101                return false; /* Nothing to spill */
1102             break;
1103          }
1104 
1105          spill_reg(reg);
1106          spilled++;
1107       }
1108    }
1109 
1110    if (spilled)
1111       fs->invalidate_analysis(DEPENDENCY_INSTRUCTIONS | DEPENDENCY_VARIABLES);
1112 
1113    /* Get the chosen virtual registers for each node, and map virtual
1114     * regs in the register classes back down to real hardware reg
1115     * numbers.
1116     */
1117    unsigned hw_reg_mapping[fs->alloc.count];
1118    fs->grf_used = fs->first_non_payload_grf;
1119    for (unsigned i = 0; i < fs->alloc.count; i++) {
1120       int reg = ra_get_node_reg(g, first_vgrf_node + i);
1121 
1122       hw_reg_mapping[i] = reg;
1123       fs->grf_used = MAX2(fs->grf_used,
1124 			  hw_reg_mapping[i] + DIV_ROUND_UP(fs->alloc.sizes[i],
1125                                                            reg_unit(devinfo)));
1126    }
1127 
1128    foreach_block_and_inst(block, fs_inst, inst, fs->cfg) {
1129       assign_reg(devinfo, hw_reg_mapping, &inst->dst);
1130       for (int i = 0; i < inst->sources; i++) {
1131          assign_reg(devinfo, hw_reg_mapping, &inst->src[i]);
1132       }
1133    }
1134 
1135    fs->alloc.count = fs->grf_used;
1136 
1137    return true;
1138 }
1139 
1140 bool
brw_assign_regs(fs_visitor & s,bool allow_spilling,bool spill_all)1141 brw_assign_regs(fs_visitor &s, bool allow_spilling, bool spill_all)
1142 {
1143    fs_reg_alloc alloc(&s);
1144    bool success = alloc.assign_regs(allow_spilling, spill_all);
1145    if (!success && allow_spilling) {
1146       s.fail("no register to spill:\n");
1147       brw_print_instructions(s, NULL);
1148    }
1149    return success;
1150 }
1151