xref: /aosp_15_r20/external/mesa3d/src/intel/compiler/brw_cfg.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <[email protected]>
25  *
26  */
27 
28 #ifndef BRW_CFG_H
29 #define BRW_CFG_H
30 
31 struct bblock_t;
32 
33 #ifdef __cplusplus
34 
35 #include "brw_ir.h"
36 #include "brw_ir_analysis.h"
37 #include "brw_ir_fs.h"
38 
39 struct bblock_t;
40 
41 /**
42  * CFG edge types.
43  *
44  * A logical edge represents a potential control flow path of the original
45  * scalar program, while a physical edge represents a control flow path that
46  * may not have existed in the original program but was introduced during
47  * vectorization in order to implement divergent control flow of different
48  * shader invocations within the same SIMD thread.
49  *
50  * All logical edges in the CFG are considered to be physical edges but not
51  * the other way around -- I.e. the logical CFG is a subset of the physical
52  * one.
53  */
54 enum bblock_link_kind {
55    bblock_link_logical = 0,
56    bblock_link_physical
57 };
58 
59 struct bblock_link {
60    DECLARE_RALLOC_CXX_OPERATORS(bblock_link)
61 
bblock_linkbblock_link62    bblock_link(bblock_t *block, enum bblock_link_kind kind)
63       : block(block), kind(kind)
64    {
65    }
66 
67    struct exec_node link;
68    struct bblock_t *block;
69 
70    /* Type of this CFG edge.  Because bblock_link_logical also implies
71     * bblock_link_physical, the proper way to test for membership of edge 'l'
72     * in CFG kind 'k' is 'l.kind <= k'.
73     */
74    enum bblock_link_kind kind;
75 };
76 
77 struct fs_visitor;
78 struct cfg_t;
79 
80 struct bblock_t {
81    DECLARE_RALLOC_CXX_OPERATORS(bblock_t)
82 
83    explicit bblock_t(cfg_t *cfg);
84 
85    void add_successor(void *mem_ctx, bblock_t *successor,
86                       enum bblock_link_kind kind);
87    bool is_predecessor_of(const bblock_t *block,
88                           enum bblock_link_kind kind) const;
89    bool is_successor_of(const bblock_t *block,
90                         enum bblock_link_kind kind) const;
91    bool can_combine_with(const bblock_t *that) const;
92    void combine_with(bblock_t *that);
93    void dump(FILE *file = stderr) const;
94 
95    fs_inst *start();
96    const fs_inst *start() const;
97    fs_inst *end();
98    const fs_inst *end() const;
99 
100    bblock_t *next();
101    const bblock_t *next() const;
102    bblock_t *prev();
103    const bblock_t *prev() const;
104 
105    bool starts_with_control_flow() const;
106    bool ends_with_control_flow() const;
107 
108    fs_inst *first_non_control_flow_inst();
109    fs_inst *last_non_control_flow_inst();
110 
111 private:
112    /**
113     * \sa unlink_parents, unlink_children
114     */
115    void unlink_list(exec_list *);
116 
117 public:
unlink_parentsbblock_t118    void unlink_parents()
119    {
120       unlink_list(&parents);
121    }
122 
unlink_childrenbblock_t123    void unlink_children()
124    {
125       unlink_list(&children);
126    }
127 
128    struct exec_node link;
129    struct cfg_t *cfg;
130 
131    int start_ip;
132    int end_ip;
133 
134    /**
135     * Change in end_ip since the last time IPs of later blocks were updated.
136     */
137    int end_ip_delta;
138 
139    struct exec_list instructions;
140    struct exec_list parents;
141    struct exec_list children;
142    int num;
143 };
144 
145 static inline fs_inst *
bblock_start(struct bblock_t * block)146 bblock_start(struct bblock_t *block)
147 {
148    return (fs_inst *)exec_list_get_head(&block->instructions);
149 }
150 
151 static inline const fs_inst *
bblock_start_const(const struct bblock_t * block)152 bblock_start_const(const struct bblock_t *block)
153 {
154    return (const fs_inst *)exec_list_get_head_const(&block->instructions);
155 }
156 
157 static inline fs_inst *
bblock_end(struct bblock_t * block)158 bblock_end(struct bblock_t *block)
159 {
160    return (fs_inst *)exec_list_get_tail(&block->instructions);
161 }
162 
163 static inline const fs_inst *
bblock_end_const(const struct bblock_t * block)164 bblock_end_const(const struct bblock_t *block)
165 {
166    return (const fs_inst *)exec_list_get_tail_const(&block->instructions);
167 }
168 
169 static inline struct bblock_t *
bblock_next(struct bblock_t * block)170 bblock_next(struct bblock_t *block)
171 {
172    if (exec_node_is_tail_sentinel(block->link.next))
173       return NULL;
174 
175    return (struct bblock_t *)block->link.next;
176 }
177 
178 static inline const struct bblock_t *
bblock_next_const(const struct bblock_t * block)179 bblock_next_const(const struct bblock_t *block)
180 {
181    if (exec_node_is_tail_sentinel(block->link.next))
182       return NULL;
183 
184    return (const struct bblock_t *)block->link.next;
185 }
186 
187 static inline struct bblock_t *
bblock_prev(struct bblock_t * block)188 bblock_prev(struct bblock_t *block)
189 {
190    if (exec_node_is_head_sentinel(block->link.prev))
191       return NULL;
192 
193    return (struct bblock_t *)block->link.prev;
194 }
195 
196 static inline const struct bblock_t *
bblock_prev_const(const struct bblock_t * block)197 bblock_prev_const(const struct bblock_t *block)
198 {
199    if (exec_node_is_head_sentinel(block->link.prev))
200       return NULL;
201 
202    return (const struct bblock_t *)block->link.prev;
203 }
204 
205 static inline bool
bblock_starts_with_control_flow(const struct bblock_t * block)206 bblock_starts_with_control_flow(const struct bblock_t *block)
207 {
208    enum opcode op = bblock_start_const(block)->opcode;
209    return op == BRW_OPCODE_DO || op == BRW_OPCODE_ENDIF;
210 }
211 
212 static inline bool
bblock_ends_with_control_flow(const struct bblock_t * block)213 bblock_ends_with_control_flow(const struct bblock_t *block)
214 {
215    enum opcode op = bblock_end_const(block)->opcode;
216    return op == BRW_OPCODE_IF ||
217           op == BRW_OPCODE_ELSE ||
218           op == BRW_OPCODE_WHILE ||
219           op == BRW_OPCODE_BREAK ||
220           op == BRW_OPCODE_CONTINUE;
221 }
222 
223 static inline fs_inst *
bblock_first_non_control_flow_inst(struct bblock_t * block)224 bblock_first_non_control_flow_inst(struct bblock_t *block)
225 {
226    fs_inst *inst = bblock_start(block);
227    if (bblock_starts_with_control_flow(block))
228 #ifdef __cplusplus
229       inst = (fs_inst *)inst->next;
230 #else
231       inst = (fs_inst *)inst->link.next;
232 #endif
233    return inst;
234 }
235 
236 static inline fs_inst *
bblock_last_non_control_flow_inst(struct bblock_t * block)237 bblock_last_non_control_flow_inst(struct bblock_t *block)
238 {
239    fs_inst *inst = bblock_end(block);
240    if (bblock_ends_with_control_flow(block))
241 #ifdef __cplusplus
242       inst = (fs_inst *)inst->prev;
243 #else
244       inst = (fs_inst *)inst->link.prev;
245 #endif
246    return inst;
247 }
248 
249 inline fs_inst *
start()250 bblock_t::start()
251 {
252    return bblock_start(this);
253 }
254 
255 inline const fs_inst *
start()256 bblock_t::start() const
257 {
258    return bblock_start_const(this);
259 }
260 
261 inline fs_inst *
end()262 bblock_t::end()
263 {
264    return bblock_end(this);
265 }
266 
267 inline const fs_inst *
end()268 bblock_t::end() const
269 {
270    return bblock_end_const(this);
271 }
272 
273 inline bblock_t *
next()274 bblock_t::next()
275 {
276    return bblock_next(this);
277 }
278 
279 inline const bblock_t *
next()280 bblock_t::next() const
281 {
282    return bblock_next_const(this);
283 }
284 
285 inline bblock_t *
prev()286 bblock_t::prev()
287 {
288    return bblock_prev(this);
289 }
290 
291 inline const bblock_t *
prev()292 bblock_t::prev() const
293 {
294    return bblock_prev_const(this);
295 }
296 
297 inline bool
starts_with_control_flow()298 bblock_t::starts_with_control_flow() const
299 {
300    return bblock_starts_with_control_flow(this);
301 }
302 
303 inline bool
ends_with_control_flow()304 bblock_t::ends_with_control_flow() const
305 {
306    return bblock_ends_with_control_flow(this);
307 }
308 
309 inline fs_inst *
first_non_control_flow_inst()310 bblock_t::first_non_control_flow_inst()
311 {
312    return bblock_first_non_control_flow_inst(this);
313 }
314 
315 inline fs_inst *
last_non_control_flow_inst()316 bblock_t::last_non_control_flow_inst()
317 {
318    return bblock_last_non_control_flow_inst(this);
319 }
320 
321 struct cfg_t {
322    DECLARE_RALLOC_CXX_OPERATORS(cfg_t)
323 
324    cfg_t(const fs_visitor *s, exec_list *instructions);
325    ~cfg_t();
326 
327    void remove_block(bblock_t *block);
328 
329    bblock_t *first_block();
330    const bblock_t *first_block() const;
331    bblock_t *last_block();
332    const bblock_t *last_block() const;
333 
334    bblock_t *new_block();
335    void set_next_block(bblock_t **cur, bblock_t *block, int ip);
336    void make_block_array();
337 
338    void dump(FILE *file = stderr);
339    void dump_cfg();
340 
341 #ifdef NDEBUG
validatecfg_t342    void validate(UNUSED const char *stage_abbrev) { }
343 #else
344    void validate(const char *stage_abbrev);
345 #endif
346 
347    /**
348     * Propagate bblock_t::end_ip_delta data through the CFG.
349     */
350    inline void adjust_block_ips();
351 
352    const struct fs_visitor *s;
353    void *mem_ctx;
354 
355    /** Ordered list (by ip) of basic blocks */
356    struct exec_list block_list;
357    struct bblock_t **blocks;
358    int num_blocks;
359 };
360 
361 static inline struct bblock_t *
cfg_first_block(struct cfg_t * cfg)362 cfg_first_block(struct cfg_t *cfg)
363 {
364    return (struct bblock_t *)exec_list_get_head(&cfg->block_list);
365 }
366 
367 static inline const struct bblock_t *
cfg_first_block_const(const struct cfg_t * cfg)368 cfg_first_block_const(const struct cfg_t *cfg)
369 {
370    return (const struct bblock_t *)exec_list_get_head_const(&cfg->block_list);
371 }
372 
373 static inline struct bblock_t *
cfg_last_block(struct cfg_t * cfg)374 cfg_last_block(struct cfg_t *cfg)
375 {
376    return (struct bblock_t *)exec_list_get_tail(&cfg->block_list);
377 }
378 
379 static inline const struct bblock_t *
cfg_last_block_const(const struct cfg_t * cfg)380 cfg_last_block_const(const struct cfg_t *cfg)
381 {
382    return (const struct bblock_t *)exec_list_get_tail_const(&cfg->block_list);
383 }
384 
385 inline bblock_t *
first_block()386 cfg_t::first_block()
387 {
388    return cfg_first_block(this);
389 }
390 
391 const inline bblock_t *
first_block()392 cfg_t::first_block() const
393 {
394    return cfg_first_block_const(this);
395 }
396 
397 inline bblock_t *
last_block()398 cfg_t::last_block()
399 {
400    return cfg_last_block(this);
401 }
402 
403 const inline bblock_t *
last_block()404 cfg_t::last_block() const
405 {
406    return cfg_last_block_const(this);
407 }
408 
409 /* Note that this is implemented with a double for loop -- break will
410  * break from the inner loop only!
411  */
412 #define foreach_block_and_inst(__block, __type, __inst, __cfg) \
413    foreach_block (__block, __cfg)                              \
414       foreach_inst_in_block (__type, __inst, __block)
415 
416 /* Note that this is implemented with a double for loop -- break will
417  * break from the inner loop only!
418  */
419 #define foreach_block_and_inst_safe(__block, __type, __inst, __cfg) \
420    foreach_block_safe (__block, __cfg)                              \
421       foreach_inst_in_block_safe (__type, __inst, __block)
422 
423 #define foreach_block(__block, __cfg)                          \
424    foreach_list_typed (bblock_t, __block, link, &(__cfg)->block_list)
425 
426 #define foreach_block_reverse(__block, __cfg)                  \
427    foreach_list_typed_reverse (bblock_t, __block, link, &(__cfg)->block_list)
428 
429 #define foreach_block_safe(__block, __cfg)                     \
430    foreach_list_typed_safe (bblock_t, __block, link, &(__cfg)->block_list)
431 
432 #define foreach_block_reverse_safe(__block, __cfg)             \
433    foreach_list_typed_reverse_safe (bblock_t, __block, link, &(__cfg)->block_list)
434 
435 #define foreach_inst_in_block(__type, __inst, __block)         \
436    foreach_in_list(__type, __inst, &(__block)->instructions)
437 
438 #define foreach_inst_in_block_safe(__type, __inst, __block)    \
439    for (__type *__inst = (__type *)__block->instructions.head_sentinel.next, \
440                *__next = (__type *)__inst->next;               \
441         __next != NULL;                                        \
442         __inst = __next,                                       \
443         __next = (__type *)__next->next)
444 
445 #define foreach_inst_in_block_reverse(__type, __inst, __block) \
446    foreach_in_list_reverse(__type, __inst, &(__block)->instructions)
447 
448 #define foreach_inst_in_block_reverse_safe(__type, __inst, __block) \
449    foreach_in_list_reverse_safe(__type, __inst, &(__block)->instructions)
450 
451 #define foreach_inst_in_block_starting_from(__type, __scan_inst, __inst) \
452    for (__type *__scan_inst = (__type *)__inst->next;          \
453         !__scan_inst->is_tail_sentinel();                      \
454         __scan_inst = (__type *)__scan_inst->next)
455 
456 #define foreach_inst_in_block_reverse_starting_from(__type, __scan_inst, __inst) \
457    for (__type *__scan_inst = (__type *)__inst->prev;          \
458         !__scan_inst->is_head_sentinel();                      \
459         __scan_inst = (__type *)__scan_inst->prev)
460 
461 inline void
adjust_block_ips()462 cfg_t::adjust_block_ips()
463 {
464    int delta = 0;
465 
466    foreach_block(block, this) {
467       block->start_ip += delta;
468       block->end_ip += delta;
469 
470       delta += block->end_ip_delta;
471 
472       block->end_ip_delta = 0;
473    }
474 }
475 
476 namespace brw {
477    /**
478     * Immediate dominator tree analysis of a shader.
479     */
480    struct idom_tree {
481       idom_tree(const fs_visitor *s);
482       ~idom_tree();
483 
484       bool
validateidom_tree485       validate(const fs_visitor *) const
486       {
487          /* FINISHME */
488          return true;
489       }
490 
491       analysis_dependency_class
dependency_classidom_tree492       dependency_class() const
493       {
494          return DEPENDENCY_BLOCKS;
495       }
496 
497       const bblock_t *
parentidom_tree498       parent(const bblock_t *b) const
499       {
500          assert(unsigned(b->num) < num_parents);
501          return parents[b->num];
502       }
503 
504       bblock_t *
parentidom_tree505       parent(bblock_t *b) const
506       {
507          assert(unsigned(b->num) < num_parents);
508          return parents[b->num];
509       }
510 
511       bblock_t *
512       intersect(bblock_t *b1, bblock_t *b2) const;
513 
514       /**
515        * Returns true if block `a` dominates block `b`.
516        */
517       bool
dominatesidom_tree518       dominates(const bblock_t *a, const bblock_t *b) const
519       {
520          while (a != b) {
521             if (b->num == 0)
522                return false;
523 
524             b = parent(b);
525          }
526          return true;
527       }
528 
529       void dump(FILE *file = stderr) const;
530 
531    private:
532       unsigned num_parents;
533       bblock_t **parents;
534    };
535 }
536 
537 #endif
538 
539 #endif /* BRW_CFG_H */
540