xref: /aosp_15_r20/external/mesa3d/src/freedreno/ir3/ir3_reconvergence.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2023 Valve Corporation
3  * SPDX-License-Identifier: MIT
4  */
5 
6 /* The pass uses information on which branches are divergent in order to
7  * determine which blocks are "reconvergence points" where parked threads may
8  * become reactivated as well as to add "physical" edges where the machine may
9  * fall through to the next reconvergence point. Reconvergence points need a
10  * (jp) added in the assembly, and physical edges are needed to model shared
11  * register liveness correctly. Reconvergence happens in the following two
12  * scenarios:
13  *
14  * 1. When there is a divergent branch, the later of the two block destinations
15  *    becomes a reconvergence point.
16  * 2. When a forward edge crosses over a reconvergence point that may be
17  *    outstanding at the start of the edge, we need to park the threads that
18  *    take the edge and resume execution at the reconvergence point. This means
19  *    that there is a physical edge from the start of the edge to the
20  *    reconvergence point, and the destination of the edge becomes a new
21  *    reconvergence point.
22  *
23  * For example, consider this simple if-else:
24  *
25  *    bb0:
26  *    ...
27  *    br p0.x, #bb1, #bb2
28  *    bb1:
29  *    ...
30  *    jump bb3
31  *    bb2:
32  *    ...
33  *    jump bb3
34  *    bb3:
35  *    ...
36  *
37  * The divergent branch at the end of bb0 makes bb2 a reconvergence point
38  * following (1), which starts being outstanding after the branch at the end of
39  * bb1. The jump to bb3 at the end of bb1 goes over bb2 while it is outstanding,
40  * so there is a physical edge from bb1 to bb2 and bb3 is a reconvergence point
41  * following (2).
42  *
43  * Note that (2) can apply recursively. To handle this efficiently we build an
44  * interval tree of forward edges that cross other blocks and whenever a block
45  * becomes a RP we iterate through the edges jumping across it using the tree.
46  * We also need to keep track of the range where each RP may be
47  * "outstanding." A RP becomes outstanding after a branch to it parks its
48  * threads there. This range may increase in size as we discover more and more
49  * branches to it that may park their threads there.
50  *
51  * Finally, we need to compute the branchstack value, which is the maximum
52  * number of outstanding reconvergence points. For the if-else, the branchstack
53  * is 2, because after the jump at the end of bb2 both reconvergence points are
54  * outstanding (although the first is removed immediately afterwards). Because
55  * we already computed the range where each RP is outstanding, this part is
56  * relatively straightforward.
57  */
58 
59 #include <limits.h>
60 
61 #include "ir3_shader.h"
62 
63 #include "util/rb_tree.h"
64 #include "util/u_worklist.h"
65 #include "util/ralloc.h"
66 
67 struct logical_edge {
68    struct uinterval_node node;
69    struct ir3_block *start_block;
70    struct ir3_block *end_block;
71 };
72 
73 struct block_data {
74    /* For a reconvergance point, the index of the first block where, upon
75     * exiting, the RP may be outstanding. Normally this is a predecessor but may
76     * be a loop header for loops.
77     */
78    unsigned first_divergent_pred;
79 
80    /* The last processed first_divergent_pred. */
81    unsigned first_processed_divergent_pred;
82 
83    /* The number of blocks that have this block as a first_divergent_pred. */
84    unsigned divergence_count;
85 };
86 
87 void
ir3_calc_reconvergence(struct ir3_shader_variant * so)88 ir3_calc_reconvergence(struct ir3_shader_variant *so)
89 {
90    void *mem_ctx = ralloc_context(NULL);
91 
92    /* It's important that the index we use corresponds to the final order blocks
93     * are emitted in!
94     */
95    unsigned index = 0;
96    foreach_block (block, &so->ir->block_list) {
97       block->index = index++;
98    }
99 
100    /* Setup the tree of edges */
101    unsigned edge_count = 0;
102    foreach_block (block, &so->ir->block_list) {
103       if (block->successors[0])
104          edge_count++;
105       if (block->successors[1])
106          edge_count++;
107 
108       block->physical_predecessors_count = 0;
109       block->physical_successors_count = 0;
110       block->reconvergence_point = false;
111    }
112 
113    struct rb_tree forward_edges, backward_edges;
114    rb_tree_init(&forward_edges);
115    rb_tree_init(&backward_edges);
116 
117    unsigned edge = 0;
118    struct logical_edge *edges =
119       ralloc_array(mem_ctx, struct logical_edge, edge_count);
120    struct block_data *blocks =
121       ralloc_array(mem_ctx, struct block_data, index);
122    foreach_block (block, &so->ir->block_list) {
123       blocks[block->index].divergence_count = 0;
124       blocks[block->index].first_divergent_pred = UINT_MAX;
125       blocks[block->index].first_processed_divergent_pred = UINT_MAX;
126       for (unsigned i = 0; i < ARRAY_SIZE(block->successors); i++) {
127          if (block->successors[i]) {
128             ir3_block_link_physical(block, block->successors[i]);
129 
130             if (block->successors[i]->index > block->index + 1) {
131                edges[edge] = (struct logical_edge) {
132                   .node = {
133                      .interval = {
134                         block->index + 1,
135                         block->successors[i]->index - 1
136                      },
137                   },
138                   .start_block = block,
139                   .end_block = block->successors[i],
140                };
141 
142                uinterval_tree_insert(&forward_edges, &edges[edge++].node);
143             } else if (block->successors[i]->index <= block->index) {
144                edges[edge] = (struct logical_edge) {
145                   .node = {
146                      .interval = {
147                         block->successors[i]->index - 1,
148                         block->index + 1
149                      },
150                   },
151                   .start_block = block->successors[i],
152                   .end_block = block,
153                };
154 
155                uinterval_tree_insert(&backward_edges, &edges[edge++].node);
156             }
157          }
158       }
159    }
160 
161    assert(edge <= edge_count);
162 
163    u_worklist worklist;
164    u_worklist_init(&worklist, index, mem_ctx);
165 
166    /* First, find and mark divergent branches. The later destination will be the
167     * reconvergence point.
168     */
169    foreach_block (block, &so->ir->block_list) {
170       struct ir3_instruction *terminator = ir3_block_get_terminator(block);
171       if (!terminator)
172          continue;
173       if (terminator->opc == OPC_PREDT || terminator->opc == OPC_PREDF)
174          continue;
175       if (block->successors[0] && block->successors[1] &&
176           block->divergent_condition) {
177          struct ir3_block *reconv_points[2];
178          unsigned num_reconv_points;
179          struct ir3_instruction *prev_instr = NULL;
180 
181          if (!list_is_singular(&block->instr_list)) {
182             prev_instr =
183                list_entry(terminator->node.prev, struct ir3_instruction, node);
184          }
185 
186          if (prev_instr && is_terminator(prev_instr)) {
187             /* There are two terminating branches so both successors are
188              * reconvergence points (i.e., there is no fall through into the
189              * next block). This can only happen after ir3_legalize when we fail
190              * to eliminate a non-invertible branch. For example:
191              * getone #bb0
192              * jump #bb1
193              * bb0: (jp)...
194              * bb1: (jp)...
195              */
196             reconv_points[0] = block->successors[0];
197             reconv_points[1] = block->successors[1];
198             num_reconv_points = 2;
199          } else {
200             unsigned idx =
201                block->successors[0]->index > block->successors[1]->index ? 0
202                                                                          : 1;
203             reconv_points[0] = block->successors[idx];
204             reconv_points[1] = NULL;
205             num_reconv_points = 1;
206          }
207 
208          for (unsigned i = 0; i < num_reconv_points; i++) {
209             struct ir3_block *reconv_point = reconv_points[i];
210             reconv_point->reconvergence_point = true;
211 
212             struct block_data *reconv_point_data = &blocks[reconv_point->index];
213             if (reconv_point_data->first_divergent_pred > block->index) {
214                reconv_point_data->first_divergent_pred = block->index;
215             }
216 
217             u_worklist_push_tail(&worklist, reconv_point, index);
218          }
219       }
220    }
221 
222    while (!u_worklist_is_empty(&worklist)) {
223       struct ir3_block *block =
224          u_worklist_pop_head(&worklist, struct ir3_block, index);
225       assert(block->reconvergence_point);
226 
227       /* Backwards branches extend the range of divergence. For example, a
228        * divergent break creates a reconvergence point after the loop that
229        * stays outstanding throughout subsequent iterations, even at points
230        * before the break. This takes that into account.
231        *
232        * More precisely, a backwards edge that originates between the block and
233        * it's first_divergent_pred (i.e. in the divergence range) extends the
234        * divergence range to the beginning of its destination if it is taken, or
235        * alternatively to the end of the block before its destination.
236        */
237       struct uinterval interval2 = {
238          blocks[block->index].first_divergent_pred,
239          blocks[block->index].first_divergent_pred
240       };
241       uinterval_tree_foreach (struct logical_edge, back_edge, interval2, &backward_edges,
242                               node) {
243          if (back_edge->end_block->index < block->index) {
244             if (blocks[block->index].first_divergent_pred >
245                 back_edge->start_block->index - 1) {
246                blocks[block->index].first_divergent_pred =
247                   back_edge->start_block->index - 1;
248             }
249          }
250       }
251 
252       /* Iterate over all edges stepping over the block. */
253       struct uinterval interval = { block->index, block->index };
254       struct logical_edge *prev = NULL;
255       uinterval_tree_foreach (struct logical_edge, edge, interval, &forward_edges,
256                               node) {
257          /* If "block" definitely isn't outstanding when the branch
258           * corresponding to "edge" is taken, then we don't need to park
259           * "edge->end_block" and we can ignore this.
260           *
261           * TODO: add uinterval_tree_foreach_from() and use that instead.
262           */
263          if (edge->start_block->index <= blocks[block->index].first_divergent_pred)
264             continue;
265 
266          /* If we've already processed this edge + RP pair, don't process it
267           * again. Because edges are ordered by start point, we must have
268           * processed every edge after this too.
269           */
270          if (edge->start_block->index >
271              blocks[block->index].first_processed_divergent_pred)
272             break;
273 
274          edge->end_block->reconvergence_point = true;
275          if (blocks[edge->end_block->index].first_divergent_pred >
276              edge->start_block->index) {
277             blocks[edge->end_block->index].first_divergent_pred =
278                edge->start_block->index;
279             u_worklist_push_tail(&worklist, edge->end_block, index);
280          }
281 
282          if (!prev || prev->start_block != edge->start_block) {
283             /* We should only process this edge + block combination once, and
284              * we use the fact that edges are sorted by start point to avoid
285              * adding redundant physical edges in case multiple edges have the
286              * same start point by comparing with the previous edge. Therefore
287              * we should only add the physical edge once.
288              * However, we should skip logical successors of the edge's start
289              * block since physical edges for those have already been added
290              * initially.
291              */
292             if (block != edge->start_block->successors[0] &&
293                 block != edge->start_block->successors[1]) {
294                for (unsigned i = 0; i < block->physical_predecessors_count; i++)
295                   assert(block->physical_predecessors[i] != edge->start_block);
296                ir3_block_link_physical(edge->start_block, block);
297             }
298          }
299          prev = edge;
300       }
301 
302       blocks[block->index].first_processed_divergent_pred =
303          blocks[block->index].first_divergent_pred;
304    }
305 
306    /* For each reconvergent point p we have an open range
307     * (p->first_divergent_pred, p) where p may be outstanding. We need to keep
308     * track of the number of outstanding RPs and calculate the maximum.
309     */
310    foreach_block (block, &so->ir->block_list) {
311       if (block->reconvergence_point) {
312          blocks[blocks[block->index].first_divergent_pred].divergence_count++;
313       }
314    }
315 
316    unsigned rc_level = 0;
317    so->branchstack = 0;
318    foreach_block (block, &so->ir->block_list) {
319       if (block->reconvergence_point)
320          rc_level--;
321 
322       /* Account for lowerings that produce divergent control flow. */
323       foreach_instr (instr, &block->instr_list) {
324          switch (instr->opc) {
325          case OPC_SCAN_MACRO:
326             so->branchstack = MAX2(so->branchstack, rc_level + 2);
327             break;
328          case OPC_BALLOT_MACRO:
329          case OPC_READ_COND_MACRO:
330          case OPC_ELECT_MACRO:
331          case OPC_READ_FIRST_MACRO:
332             so->branchstack = MAX2(so->branchstack, rc_level + 1);
333             break;
334          default:
335             break;
336          }
337       }
338 
339       rc_level += blocks[block->index].divergence_count;
340 
341       so->branchstack = MAX2(so->branchstack, rc_level);
342    }
343    assert(rc_level == 0);
344 
345    ralloc_free(mem_ctx);
346 }
347 
348