xref: /aosp_15_r20/external/mesa3d/src/amd/compiler/aco_insert_waitcnt.cpp (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2018 Valve Corporation
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "aco_builder.h"
8 #include "aco_ir.h"
9 
10 #include "common/sid.h"
11 
12 #include <map>
13 #include <stack>
14 #include <vector>
15 #include <optional>
16 
17 namespace aco {
18 
19 namespace {
20 
21 /**
22  * The general idea of this pass is:
23  * The CFG is traversed in reverse postorder (forward) and loops are processed
24  * several times until no progress is made.
25  * Per BB two wait_ctx is maintained: an in-context and out-context.
26  * The in-context is the joined out-contexts of the predecessors.
27  * The context contains a map: gpr -> wait_entry
28  * consisting of the information about the cnt values to be waited for.
29  * Note: After merge-nodes, it might occur that for the same register
30  *       multiple cnt values are to be waited for.
31  *
32  * The values are updated according to the encountered instructions:
33  * - additional events increment the counter of waits of the same type
34  * - or erase gprs with counters higher than to be waited for.
35  */
36 
37 // TODO: do a more clever insertion of wait_cnt (lgkm_cnt)
38 // when there is a load followed by a use of a previous load
39 
40 /* Instructions of the same event will finish in-order except for smem
41  * and maybe flat. Instructions of different events may not finish in-order. */
42 enum wait_event : uint32_t {
43    event_smem = 1 << 0,
44    event_lds = 1 << 1,
45    event_gds = 1 << 2,
46    event_vmem = 1 << 3,
47    event_vmem_store = 1 << 4, /* GFX10+ */
48    event_flat = 1 << 5,
49    event_exp_pos = 1 << 6,
50    event_exp_param = 1 << 7,
51    event_exp_mrt_null = 1 << 8,
52    event_gds_gpr_lock = 1 << 9,
53    event_vmem_gpr_lock = 1 << 10,
54    event_sendmsg = 1 << 11,
55    event_ldsdir = 1 << 12,
56    event_vmem_sample = 1 << 13, /* GFX12+ */
57    event_vmem_bvh = 1 << 14,    /* GFX12+ */
58    num_events = 15,
59 };
60 
61 enum counter_type : uint8_t {
62    counter_exp = 1 << wait_type_exp,
63    counter_lgkm = 1 << wait_type_lgkm,
64    counter_vm = 1 << wait_type_vm,
65    counter_vs = 1 << wait_type_vs,
66    counter_sample = 1 << wait_type_sample,
67    counter_bvh = 1 << wait_type_bvh,
68    counter_km = 1 << wait_type_km,
69    num_counters = wait_type_num,
70 };
71 
72 struct wait_entry {
73    wait_imm imm;
74    uint32_t events;  /* use wait_event notion */
75    uint8_t counters; /* use counter_type notion */
76    bool wait_on_read : 1;
77    bool logical : 1;
78    uint8_t vmem_types : 4; /* use vmem_type notion. for counter_vm. */
79 
wait_entryaco::__anonf400bc060111::wait_entry80    wait_entry(wait_event event_, wait_imm imm_, uint8_t counters_, bool logical_,
81               bool wait_on_read_)
82        : imm(imm_), events(event_), counters(counters_), wait_on_read(wait_on_read_),
83          logical(logical_), vmem_types(0)
84    {}
85 
joinaco::__anonf400bc060111::wait_entry86    bool join(const wait_entry& other)
87    {
88       bool changed = (other.events & ~events) || (other.counters & ~counters) ||
89                      (other.wait_on_read && !wait_on_read) || (other.vmem_types & !vmem_types) ||
90                      (!other.logical && logical);
91       events |= other.events;
92       counters |= other.counters;
93       changed |= imm.combine(other.imm);
94       wait_on_read |= other.wait_on_read;
95       vmem_types |= other.vmem_types;
96       logical &= other.logical;
97       return changed;
98    }
99 
remove_waitaco::__anonf400bc060111::wait_entry100    void remove_wait(wait_type type, uint32_t type_events)
101    {
102       counters &= ~(1 << type);
103       imm[type] = wait_imm::unset_counter;
104 
105       events &= ~type_events | event_flat;
106       if (!(counters & counter_lgkm) && !(counters & counter_vm))
107          events &= ~(type_events & event_flat);
108 
109       if (type == wait_type_vm)
110          vmem_types = 0;
111    }
112 
printaco::__anonf400bc060111::wait_entry113    UNUSED void print(FILE* output) const
114    {
115       fprintf(output, "logical: %u\n", logical);
116       imm.print(output);
117       if (events)
118          fprintf(output, "events: %u\n", events);
119       if (counters)
120          fprintf(output, "counters: %u\n", counters);
121       if (!wait_on_read)
122          fprintf(output, "wait_on_read: %u\n", wait_on_read);
123       if (!logical)
124          fprintf(output, "logical: %u\n", logical);
125       if (vmem_types)
126          fprintf(output, "vmem_types: %u\n", vmem_types);
127    }
128 };
129 
130 struct target_info {
131    wait_imm max_cnt;
132    uint32_t events[wait_type_num] = {};
133    uint16_t unordered_events;
134 
target_infoaco::__anonf400bc060111::target_info135    target_info(enum amd_gfx_level gfx_level)
136    {
137       max_cnt = wait_imm::max(gfx_level);
138       for (unsigned i = 0; i < wait_type_num; i++)
139          max_cnt[i] = max_cnt[i] ? max_cnt[i] - 1 : 0;
140 
141       events[wait_type_exp] = event_exp_pos | event_exp_param | event_exp_mrt_null |
142                               event_gds_gpr_lock | event_vmem_gpr_lock | event_ldsdir;
143       events[wait_type_lgkm] = event_smem | event_lds | event_gds | event_flat | event_sendmsg;
144       events[wait_type_vm] = event_vmem | event_flat;
145       events[wait_type_vs] = event_vmem_store;
146       if (gfx_level >= GFX12) {
147          events[wait_type_sample] = event_vmem_sample;
148          events[wait_type_bvh] = event_vmem_bvh;
149          events[wait_type_km] = event_smem | event_sendmsg;
150          events[wait_type_lgkm] &= ~events[wait_type_km];
151       }
152 
153       for (unsigned i = 0; i < wait_type_num; i++) {
154          u_foreach_bit (j, events[i])
155             counters[j] |= (1 << i);
156       }
157 
158       unordered_events = event_smem | (gfx_level < GFX10 ? event_flat : 0);
159    }
160 
get_counters_for_eventaco::__anonf400bc060111::target_info161    uint8_t get_counters_for_event(wait_event event) const { return counters[ffs(event) - 1]; }
162 
163 private:
164    /* Bitfields of counters affected by each event */
165    uint8_t counters[num_events] = {};
166 };
167 
168 struct wait_ctx {
169    Program* program;
170    enum amd_gfx_level gfx_level;
171    const target_info* info;
172 
173    uint32_t nonzero = 0;
174    bool pending_flat_lgkm = false;
175    bool pending_flat_vm = false;
176    bool pending_s_buffer_store = false; /* GFX10 workaround */
177 
178    wait_imm barrier_imm[storage_count];
179    uint16_t barrier_events[storage_count] = {}; /* use wait_event notion */
180 
181    std::map<PhysReg, wait_entry> gpr_map;
182 
wait_ctxaco::__anonf400bc060111::wait_ctx183    wait_ctx() {}
wait_ctxaco::__anonf400bc060111::wait_ctx184    wait_ctx(Program* program_, const target_info* info_)
185        : program(program_), gfx_level(program_->gfx_level), info(info_)
186    {}
187 
joinaco::__anonf400bc060111::wait_ctx188    bool join(const wait_ctx* other, bool logical)
189    {
190       bool changed = (other->pending_flat_lgkm && !pending_flat_lgkm) ||
191                      (other->pending_flat_vm && !pending_flat_vm) || (~nonzero & other->nonzero);
192 
193       nonzero |= other->nonzero;
194       pending_flat_lgkm |= other->pending_flat_lgkm;
195       pending_flat_vm |= other->pending_flat_vm;
196       pending_s_buffer_store |= other->pending_s_buffer_store;
197 
198       for (const auto& entry : other->gpr_map) {
199          if (entry.second.logical != logical)
200             continue;
201 
202          using iterator = std::map<PhysReg, wait_entry>::iterator;
203          const std::pair<iterator, bool> insert_pair = gpr_map.insert(entry);
204          if (insert_pair.second) {
205             changed = true;
206          } else {
207             changed |= insert_pair.first->second.join(entry.second);
208          }
209       }
210 
211       for (unsigned i = 0; i < storage_count; i++) {
212          changed |= barrier_imm[i].combine(other->barrier_imm[i]);
213          changed |= (other->barrier_events[i] & ~barrier_events[i]) != 0;
214          barrier_events[i] |= other->barrier_events[i];
215       }
216 
217       return changed;
218    }
219 
printaco::__anonf400bc060111::wait_ctx220    UNUSED void print(FILE* output) const
221    {
222       for (unsigned i = 0; i < wait_type_num; i++)
223          fprintf(output, "nonzero[%u]: %u\n", i, nonzero & (1 << i) ? 1 : 0);
224       fprintf(output, "pending_flat_lgkm: %u\n", pending_flat_lgkm);
225       fprintf(output, "pending_flat_vm: %u\n", pending_flat_vm);
226       for (const auto& entry : gpr_map) {
227          fprintf(output, "gpr_map[%c%u] = {\n", entry.first.reg() >= 256 ? 'v' : 's',
228                  entry.first.reg() & 0xff);
229          entry.second.print(output);
230          fprintf(output, "}\n");
231       }
232 
233       for (unsigned i = 0; i < storage_count; i++) {
234          if (!barrier_imm[i].empty() || barrier_events[i]) {
235             fprintf(output, "barriers[%u] = {\n", i);
236             barrier_imm[i].print(output);
237             fprintf(output, "events: %u\n", barrier_events[i]);
238             fprintf(output, "}\n");
239          }
240       }
241    }
242 };
243 
244 wait_event
get_vmem_event(wait_ctx & ctx,Instruction * instr,uint8_t type)245 get_vmem_event(wait_ctx& ctx, Instruction* instr, uint8_t type)
246 {
247    if (instr->definitions.empty() && ctx.gfx_level >= GFX10)
248       return event_vmem_store;
249    wait_event ev = event_vmem;
250    if (ctx.gfx_level >= GFX12 && type != vmem_nosampler)
251       ev = type == vmem_bvh ? event_vmem_bvh : event_vmem_sample;
252    return ev;
253 }
254 
255 void
check_instr(wait_ctx & ctx,wait_imm & wait,Instruction * instr)256 check_instr(wait_ctx& ctx, wait_imm& wait, Instruction* instr)
257 {
258    for (const Operand op : instr->operands) {
259       if (op.isConstant() || op.isUndefined())
260          continue;
261 
262       /* check consecutively read gprs */
263       for (unsigned j = 0; j < op.size(); j++) {
264          std::map<PhysReg, wait_entry>::iterator it = ctx.gpr_map.find(PhysReg{op.physReg() + j});
265          if (it != ctx.gpr_map.end() && it->second.wait_on_read)
266             wait.combine(it->second.imm);
267       }
268    }
269 
270    for (const Definition& def : instr->definitions) {
271       /* check consecutively written gprs */
272       for (unsigned j = 0; j < def.getTemp().size(); j++) {
273          PhysReg reg{def.physReg() + j};
274 
275          std::map<PhysReg, wait_entry>::iterator it = ctx.gpr_map.find(reg);
276          if (it == ctx.gpr_map.end())
277             continue;
278 
279          wait_imm reg_imm = it->second.imm;
280 
281          /* Vector Memory reads and writes return in the order they were issued */
282          uint8_t vmem_type = get_vmem_type(ctx.gfx_level, instr);
283          if (vmem_type) {
284             wait_event event = get_vmem_event(ctx, instr, vmem_type);
285             wait_type type = (wait_type)(ffs(ctx.info->get_counters_for_event(event)) - 1);
286             if ((it->second.events & ctx.info->events[type]) == event &&
287                 (type != wait_type_vm || it->second.vmem_types == vmem_type))
288                reg_imm[type] = wait_imm::unset_counter;
289          }
290 
291          /* LDS reads and writes return in the order they were issued. same for GDS */
292          if (instr->isDS() && (it->second.events & ctx.info->events[wait_type_lgkm]) ==
293                                  (instr->ds().gds ? event_gds : event_lds))
294             reg_imm.lgkm = wait_imm::unset_counter;
295 
296          wait.combine(reg_imm);
297       }
298    }
299 }
300 
301 void
perform_barrier(wait_ctx & ctx,wait_imm & imm,memory_sync_info sync,unsigned semantics)302 perform_barrier(wait_ctx& ctx, wait_imm& imm, memory_sync_info sync, unsigned semantics)
303 {
304    sync_scope subgroup_scope =
305       ctx.program->workgroup_size <= ctx.program->wave_size ? scope_workgroup : scope_subgroup;
306    if ((sync.semantics & semantics) && sync.scope > subgroup_scope) {
307       unsigned storage = sync.storage;
308       while (storage) {
309          unsigned idx = u_bit_scan(&storage);
310 
311          /* LDS is private to the workgroup */
312          sync_scope bar_scope_lds = MIN2(sync.scope, scope_workgroup);
313 
314          uint16_t events = ctx.barrier_events[idx];
315          if (bar_scope_lds <= subgroup_scope)
316             events &= ~event_lds;
317 
318          /* in non-WGP, the L1 (L0 on GFX10+) cache keeps all memory operations
319           * in-order for the same workgroup */
320          if (!ctx.program->wgp_mode && sync.scope <= scope_workgroup)
321             events &= ~(event_vmem | event_vmem_store | event_smem);
322 
323          if (events)
324             imm.combine(ctx.barrier_imm[idx]);
325       }
326    }
327 }
328 
329 void
force_waitcnt(wait_ctx & ctx,wait_imm & imm)330 force_waitcnt(wait_ctx& ctx, wait_imm& imm)
331 {
332    u_foreach_bit (i, ctx.nonzero)
333       imm[i] = 0;
334 }
335 
336 void
kill(wait_imm & imm,Instruction * instr,wait_ctx & ctx,memory_sync_info sync_info)337 kill(wait_imm& imm, Instruction* instr, wait_ctx& ctx, memory_sync_info sync_info)
338 {
339    if (instr->opcode == aco_opcode::s_setpc_b64 || (debug_flags & DEBUG_FORCE_WAITCNT)) {
340       /* Force emitting waitcnt states right after the instruction if there is
341        * something to wait for. This is also applied for s_setpc_b64 to ensure
342        * waitcnt states are inserted before jumping to the PS epilog.
343        */
344       force_waitcnt(ctx, imm);
345    }
346 
347    /* Make sure POPS coherent memory accesses have reached the L2 cache before letting the
348     * overlapping waves proceed into the ordered section.
349     */
350    if (ctx.program->has_pops_overlapped_waves_wait &&
351        (ctx.gfx_level >= GFX11 ? instr->isEXP() && instr->exp().done
352                                : (instr->opcode == aco_opcode::s_sendmsg &&
353                                   instr->salu().imm == sendmsg_ordered_ps_done))) {
354       uint8_t c = counter_vm | counter_vs;
355       /* Await SMEM loads too, as it's possible for an application to create them, like using a
356        * scalarization loop - pointless and unoptimal for an inherently divergent address of
357        * per-pixel data, but still can be done at least synthetically and must be handled correctly.
358        */
359       if (ctx.program->has_smem_buffer_or_global_loads)
360          c |= counter_lgkm;
361 
362       u_foreach_bit (i, c & ctx.nonzero)
363          imm[i] = 0;
364    }
365 
366    check_instr(ctx, imm, instr);
367 
368    /* It's required to wait for scalar stores before "writing back" data.
369     * It shouldn't cost anything anyways since we're about to do s_endpgm.
370     */
371    if ((ctx.nonzero & BITFIELD_BIT(wait_type_lgkm)) && instr->opcode == aco_opcode::s_dcache_wb) {
372       assert(ctx.gfx_level >= GFX8);
373       imm.lgkm = 0;
374    }
375 
376    if (ctx.gfx_level >= GFX10 && instr->isSMEM()) {
377       /* GFX10: A store followed by a load at the same address causes a problem because
378        * the load doesn't load the correct values unless we wait for the store first.
379        * This is NOT mitigated by an s_nop.
380        *
381        * TODO: Refine this when we have proper alias analysis.
382        */
383       if (ctx.pending_s_buffer_store && !instr->smem().definitions.empty() &&
384           !instr->smem().sync.can_reorder()) {
385          imm.lgkm = 0;
386       }
387    }
388 
389    if (instr->opcode == aco_opcode::ds_ordered_count &&
390        ((instr->ds().offset1 | (instr->ds().offset0 >> 8)) & 0x1)) {
391       imm.combine(ctx.barrier_imm[ffs(storage_gds) - 1]);
392    }
393 
394    if (instr->opcode == aco_opcode::p_barrier)
395       perform_barrier(ctx, imm, instr->barrier().sync, semantic_acqrel);
396    else
397       perform_barrier(ctx, imm, sync_info, semantic_release);
398 
399    if (!imm.empty()) {
400       if (ctx.pending_flat_vm && imm.vm != wait_imm::unset_counter)
401          imm.vm = 0;
402       if (ctx.pending_flat_lgkm && imm.lgkm != wait_imm::unset_counter)
403          imm.lgkm = 0;
404 
405       /* reset counters */
406       for (unsigned i = 0; i < wait_type_num; i++)
407          ctx.nonzero &= imm[i] == 0 ? ~BITFIELD_BIT(i) : UINT32_MAX;
408 
409       /* update barrier wait imms */
410       for (unsigned i = 0; i < storage_count; i++) {
411          wait_imm& bar = ctx.barrier_imm[i];
412          uint16_t& bar_ev = ctx.barrier_events[i];
413          for (unsigned j = 0; j < wait_type_num; j++) {
414             if (bar[j] != wait_imm::unset_counter && imm[j] <= bar[j]) {
415                bar[j] = wait_imm::unset_counter;
416                bar_ev &= ~ctx.info->events[j] | event_flat;
417             }
418          }
419          if (bar.vm == wait_imm::unset_counter && bar.lgkm == wait_imm::unset_counter)
420             bar_ev &= ~event_flat;
421       }
422 
423       /* remove all gprs with higher counter from map */
424       std::map<PhysReg, wait_entry>::iterator it = ctx.gpr_map.begin();
425       while (it != ctx.gpr_map.end()) {
426          for (unsigned i = 0; i < wait_type_num; i++) {
427             if (imm[i] != wait_imm::unset_counter && imm[i] <= it->second.imm[i])
428                it->second.remove_wait((wait_type)i, ctx.info->events[i]);
429          }
430          if (!it->second.counters)
431             it = ctx.gpr_map.erase(it);
432          else
433             it++;
434       }
435    }
436 
437    if (imm.vm == 0)
438       ctx.pending_flat_vm = false;
439    if (imm.lgkm == 0) {
440       ctx.pending_flat_lgkm = false;
441       ctx.pending_s_buffer_store = false;
442    }
443 }
444 
445 void
update_barrier_imm(wait_ctx & ctx,uint8_t counters,wait_event event,memory_sync_info sync)446 update_barrier_imm(wait_ctx& ctx, uint8_t counters, wait_event event, memory_sync_info sync)
447 {
448    for (unsigned i = 0; i < storage_count; i++) {
449       wait_imm& bar = ctx.barrier_imm[i];
450       uint16_t& bar_ev = ctx.barrier_events[i];
451       if (sync.storage & (1 << i) && !(sync.semantics & semantic_private)) {
452          bar_ev |= event;
453          u_foreach_bit (j, counters)
454             bar[j] = 0;
455       } else if (!(bar_ev & ctx.info->unordered_events) && !(ctx.info->unordered_events & event)) {
456          u_foreach_bit (j, counters) {
457             if (bar[j] != wait_imm::unset_counter && (bar_ev & ctx.info->events[j]) == event)
458                bar[j] = std::min<uint16_t>(bar[j] + 1, ctx.info->max_cnt[j]);
459          }
460       }
461    }
462 }
463 
464 void
update_counters(wait_ctx & ctx,wait_event event,memory_sync_info sync=memory_sync_info ())465 update_counters(wait_ctx& ctx, wait_event event, memory_sync_info sync = memory_sync_info())
466 {
467    uint8_t counters = ctx.info->get_counters_for_event(event);
468 
469    ctx.nonzero |= counters;
470 
471    update_barrier_imm(ctx, counters, event, sync);
472 
473    if (ctx.info->unordered_events & event)
474       return;
475 
476    if (ctx.pending_flat_lgkm)
477       counters &= ~counter_lgkm;
478    if (ctx.pending_flat_vm)
479       counters &= ~counter_vm;
480 
481    for (std::pair<const PhysReg, wait_entry>& e : ctx.gpr_map) {
482       wait_entry& entry = e.second;
483 
484       if (entry.events & ctx.info->unordered_events)
485          continue;
486 
487       assert(entry.events);
488 
489       u_foreach_bit (i, counters) {
490          if ((entry.events & ctx.info->events[i]) == event)
491             entry.imm[i] = std::min<uint16_t>(entry.imm[i] + 1, ctx.info->max_cnt[i]);
492       }
493    }
494 }
495 
496 void
update_counters_for_flat_load(wait_ctx & ctx,memory_sync_info sync=memory_sync_info ())497 update_counters_for_flat_load(wait_ctx& ctx, memory_sync_info sync = memory_sync_info())
498 {
499    assert(ctx.gfx_level < GFX10);
500 
501    ctx.nonzero |= BITFIELD_BIT(wait_type_lgkm) | BITFIELD_BIT(wait_type_vm);
502 
503    update_barrier_imm(ctx, counter_vm | counter_lgkm, event_flat, sync);
504 
505    for (std::pair<PhysReg, wait_entry> e : ctx.gpr_map) {
506       if (e.second.counters & counter_vm)
507          e.second.imm.vm = 0;
508       if (e.second.counters & counter_lgkm)
509          e.second.imm.lgkm = 0;
510    }
511    ctx.pending_flat_lgkm = true;
512    ctx.pending_flat_vm = true;
513 }
514 
515 void
insert_wait_entry(wait_ctx & ctx,PhysReg reg,RegClass rc,wait_event event,bool wait_on_read,uint8_t vmem_types=0,bool force_linear=false)516 insert_wait_entry(wait_ctx& ctx, PhysReg reg, RegClass rc, wait_event event, bool wait_on_read,
517                   uint8_t vmem_types = 0, bool force_linear = false)
518 {
519    uint16_t counters = ctx.info->get_counters_for_event(event);
520    wait_imm imm;
521    u_foreach_bit (i, counters)
522       imm[i] = 0;
523 
524    wait_entry new_entry(event, imm, counters, !rc.is_linear() && !force_linear, wait_on_read);
525    if (counters & counter_vm)
526       new_entry.vmem_types |= vmem_types;
527 
528    for (unsigned i = 0; i < rc.size(); i++) {
529       auto it = ctx.gpr_map.emplace(PhysReg{reg.reg() + i}, new_entry);
530       if (!it.second)
531          it.first->second.join(new_entry);
532    }
533 }
534 
535 void
insert_wait_entry(wait_ctx & ctx,Operand op,wait_event event,uint8_t vmem_types=0)536 insert_wait_entry(wait_ctx& ctx, Operand op, wait_event event, uint8_t vmem_types = 0)
537 {
538    if (!op.isConstant() && !op.isUndefined())
539       insert_wait_entry(ctx, op.physReg(), op.regClass(), event, false, vmem_types);
540 }
541 
542 void
insert_wait_entry(wait_ctx & ctx,Definition def,wait_event event,uint8_t vmem_types=0)543 insert_wait_entry(wait_ctx& ctx, Definition def, wait_event event, uint8_t vmem_types = 0)
544 {
545    /* We can't safely write to unwritten destination VGPR lanes with DS/VMEM on GFX11 without
546     * waiting for the load to finish.
547     */
548    uint32_t ds_vmem_events = event_lds | event_gds | event_vmem | event_flat;
549    bool force_linear = ctx.gfx_level >= GFX11 && (event & ds_vmem_events);
550 
551    insert_wait_entry(ctx, def.physReg(), def.regClass(), event, true, vmem_types, force_linear);
552 }
553 
554 void
gen(Instruction * instr,wait_ctx & ctx)555 gen(Instruction* instr, wait_ctx& ctx)
556 {
557    switch (instr->format) {
558    case Format::EXP: {
559       Export_instruction& exp_instr = instr->exp();
560 
561       wait_event ev;
562       if (exp_instr.dest <= 9)
563          ev = event_exp_mrt_null;
564       else if (exp_instr.dest <= 15)
565          ev = event_exp_pos;
566       else
567          ev = event_exp_param;
568       update_counters(ctx, ev);
569 
570       /* insert new entries for exported vgprs */
571       for (unsigned i = 0; i < 4; i++) {
572          if (exp_instr.enabled_mask & (1 << i)) {
573             unsigned idx = exp_instr.compressed ? i >> 1 : i;
574             assert(idx < exp_instr.operands.size());
575             insert_wait_entry(ctx, exp_instr.operands[idx], ev);
576          }
577       }
578       insert_wait_entry(ctx, exec, s2, ev, false);
579       break;
580    }
581    case Format::FLAT: {
582       FLAT_instruction& flat = instr->flat();
583       if (ctx.gfx_level < GFX10 && !instr->definitions.empty())
584          update_counters_for_flat_load(ctx, flat.sync);
585       else
586          update_counters(ctx, event_flat, flat.sync);
587 
588       if (!instr->definitions.empty())
589          insert_wait_entry(ctx, instr->definitions[0], event_flat);
590       break;
591    }
592    case Format::SMEM: {
593       SMEM_instruction& smem = instr->smem();
594       update_counters(ctx, event_smem, smem.sync);
595 
596       if (!instr->definitions.empty())
597          insert_wait_entry(ctx, instr->definitions[0], event_smem);
598       else if (ctx.gfx_level >= GFX10 && !smem.sync.can_reorder())
599          ctx.pending_s_buffer_store = true;
600 
601       break;
602    }
603    case Format::DS: {
604       DS_instruction& ds = instr->ds();
605       update_counters(ctx, ds.gds ? event_gds : event_lds, ds.sync);
606       if (ds.gds)
607          update_counters(ctx, event_gds_gpr_lock);
608 
609       if (!instr->definitions.empty())
610          insert_wait_entry(ctx, instr->definitions[0], ds.gds ? event_gds : event_lds);
611 
612       if (ds.gds) {
613          for (const Operand& op : instr->operands)
614             insert_wait_entry(ctx, op, event_gds_gpr_lock);
615          insert_wait_entry(ctx, exec, s2, event_gds_gpr_lock, false);
616       }
617       break;
618    }
619    case Format::LDSDIR: {
620       LDSDIR_instruction& ldsdir = instr->ldsdir();
621       update_counters(ctx, event_ldsdir, ldsdir.sync);
622       insert_wait_entry(ctx, instr->definitions[0], event_ldsdir);
623       break;
624    }
625    case Format::MUBUF:
626    case Format::MTBUF:
627    case Format::MIMG:
628    case Format::GLOBAL:
629    case Format::SCRATCH: {
630       uint8_t type = get_vmem_type(ctx.gfx_level, instr);
631       wait_event ev = get_vmem_event(ctx, instr, type);
632 
633       update_counters(ctx, ev, get_sync_info(instr));
634 
635       if (!instr->definitions.empty())
636          insert_wait_entry(ctx, instr->definitions[0], ev, type);
637 
638       if (ctx.gfx_level == GFX6 && instr->format != Format::MIMG && instr->operands.size() == 4) {
639          update_counters(ctx, event_vmem_gpr_lock);
640          insert_wait_entry(ctx, instr->operands[3], event_vmem_gpr_lock);
641       } else if (ctx.gfx_level == GFX6 && instr->isMIMG() && !instr->operands[2].isUndefined()) {
642          update_counters(ctx, event_vmem_gpr_lock);
643          insert_wait_entry(ctx, instr->operands[2], event_vmem_gpr_lock);
644       }
645 
646       break;
647    }
648    case Format::SOPP: {
649       if (instr->opcode == aco_opcode::s_sendmsg || instr->opcode == aco_opcode::s_sendmsghalt)
650          update_counters(ctx, event_sendmsg);
651       break;
652    }
653    case Format::SOP1: {
654       if (instr->opcode == aco_opcode::s_sendmsg_rtn_b32 ||
655           instr->opcode == aco_opcode::s_sendmsg_rtn_b64) {
656          update_counters(ctx, event_sendmsg);
657          insert_wait_entry(ctx, instr->definitions[0], event_sendmsg);
658       }
659       break;
660    }
661    default: break;
662    }
663 }
664 
665 void
emit_waitcnt(wait_ctx & ctx,std::vector<aco_ptr<Instruction>> & instructions,wait_imm & imm)666 emit_waitcnt(wait_ctx& ctx, std::vector<aco_ptr<Instruction>>& instructions, wait_imm& imm)
667 {
668    Builder bld(ctx.program, &instructions);
669 
670    if (ctx.gfx_level >= GFX12) {
671       if (imm.vm != wait_imm::unset_counter && imm.lgkm != wait_imm::unset_counter) {
672          bld.sopp(aco_opcode::s_wait_loadcnt_dscnt, (imm.vm << 8) | imm.lgkm);
673          imm.vm = wait_imm::unset_counter;
674          imm.lgkm = wait_imm::unset_counter;
675       }
676 
677       if (imm.vs != wait_imm::unset_counter && imm.lgkm != wait_imm::unset_counter) {
678          bld.sopp(aco_opcode::s_wait_storecnt_dscnt, (imm.vs << 8) | imm.lgkm);
679          imm.vs = wait_imm::unset_counter;
680          imm.lgkm = wait_imm::unset_counter;
681       }
682 
683       aco_opcode op[wait_type_num];
684       op[wait_type_exp] = aco_opcode::s_wait_expcnt;
685       op[wait_type_lgkm] = aco_opcode::s_wait_dscnt;
686       op[wait_type_vm] = aco_opcode::s_wait_loadcnt;
687       op[wait_type_vs] = aco_opcode::s_wait_storecnt;
688       op[wait_type_sample] = aco_opcode::s_wait_samplecnt;
689       op[wait_type_bvh] = aco_opcode::s_wait_bvhcnt;
690       op[wait_type_km] = aco_opcode::s_wait_kmcnt;
691 
692       for (unsigned i = 0; i < wait_type_num; i++) {
693          if (imm[i] != wait_imm::unset_counter)
694             bld.sopp(op[i], imm[i]);
695       }
696    } else {
697       if (imm.vs != wait_imm::unset_counter) {
698          assert(ctx.gfx_level >= GFX10);
699          bld.sopk(aco_opcode::s_waitcnt_vscnt, Operand(sgpr_null, s1), imm.vs);
700          imm.vs = wait_imm::unset_counter;
701       }
702       if (!imm.empty())
703          bld.sopp(aco_opcode::s_waitcnt, imm.pack(ctx.gfx_level));
704    }
705    imm = wait_imm();
706 }
707 
708 bool
check_clause_raw(std::bitset<512> & regs_written,Instruction * instr)709 check_clause_raw(std::bitset<512>& regs_written, Instruction* instr)
710 {
711    for (Operand op : instr->operands) {
712       if (op.isConstant())
713          continue;
714       for (unsigned i = 0; i < op.size(); i++) {
715          if (regs_written[op.physReg().reg() + i])
716             return false;
717       }
718    }
719 
720    for (Definition def : instr->definitions) {
721       for (unsigned i = 0; i < def.size(); i++)
722          regs_written[def.physReg().reg() + i] = 1;
723    }
724 
725    return true;
726 }
727 
728 void
handle_block(Program * program,Block & block,wait_ctx & ctx)729 handle_block(Program* program, Block& block, wait_ctx& ctx)
730 {
731    std::vector<aco_ptr<Instruction>> new_instructions;
732 
733    wait_imm queued_imm;
734 
735    size_t clause_end = 0;
736    for (size_t i = 0; i < block.instructions.size(); i++) {
737       aco_ptr<Instruction>& instr = block.instructions[i];
738 
739       bool is_wait = queued_imm.unpack(ctx.gfx_level, instr.get());
740 
741       memory_sync_info sync_info = get_sync_info(instr.get());
742       kill(queued_imm, instr.get(), ctx, sync_info);
743 
744       /* At the start of a possible clause, also emit waitcnts for each instruction to avoid
745        * splitting the clause.
746        */
747       if (i >= clause_end || !queued_imm.empty()) {
748          std::optional<std::bitset<512>> regs_written;
749          for (clause_end = i + 1; clause_end < block.instructions.size(); clause_end++) {
750             Instruction* next = block.instructions[clause_end].get();
751             if (!should_form_clause(instr.get(), next))
752                break;
753 
754             if (!regs_written) {
755                regs_written.emplace();
756                check_clause_raw(*regs_written, instr.get());
757             }
758 
759             if (!check_clause_raw(*regs_written, next))
760                break;
761 
762             kill(queued_imm, next, ctx, get_sync_info(next));
763          }
764       }
765 
766       gen(instr.get(), ctx);
767 
768       if (instr->format != Format::PSEUDO_BARRIER && !is_wait) {
769          if (instr->isVINTERP_INREG() && queued_imm.exp != wait_imm::unset_counter) {
770             instr->vinterp_inreg().wait_exp = MIN2(instr->vinterp_inreg().wait_exp, queued_imm.exp);
771             queued_imm.exp = wait_imm::unset_counter;
772          }
773 
774          if (!queued_imm.empty())
775             emit_waitcnt(ctx, new_instructions, queued_imm);
776 
777          bool is_ordered_count_acquire =
778             instr->opcode == aco_opcode::ds_ordered_count &&
779             !((instr->ds().offset1 | (instr->ds().offset0 >> 8)) & 0x1);
780 
781          new_instructions.emplace_back(std::move(instr));
782          perform_barrier(ctx, queued_imm, sync_info, semantic_acquire);
783 
784          if (is_ordered_count_acquire)
785             queued_imm.combine(ctx.barrier_imm[ffs(storage_gds) - 1]);
786       }
787    }
788 
789    /* For last block of a program which has succeed shader part, wait all memory ops done
790     * before go to next shader part.
791     */
792    if (block.kind & block_kind_end_with_regs)
793       force_waitcnt(ctx, queued_imm);
794 
795    if (!queued_imm.empty())
796       emit_waitcnt(ctx, new_instructions, queued_imm);
797 
798    block.instructions.swap(new_instructions);
799 }
800 
801 } /* end namespace */
802 
803 void
insert_waitcnt(Program * program)804 insert_waitcnt(Program* program)
805 {
806    target_info info(program->gfx_level);
807 
808    /* per BB ctx */
809    std::vector<bool> done(program->blocks.size());
810    std::vector<wait_ctx> in_ctx(program->blocks.size(), wait_ctx(program, &info));
811    std::vector<wait_ctx> out_ctx(program->blocks.size(), wait_ctx(program, &info));
812 
813    std::stack<unsigned, std::vector<unsigned>> loop_header_indices;
814    unsigned loop_progress = 0;
815 
816    if (program->pending_lds_access) {
817       update_barrier_imm(in_ctx[0], info.get_counters_for_event(event_lds), event_lds,
818                          memory_sync_info(storage_shared));
819    }
820 
821    for (Definition def : program->args_pending_vmem) {
822       update_counters(in_ctx[0], event_vmem);
823       insert_wait_entry(in_ctx[0], def, event_vmem);
824    }
825 
826    for (unsigned i = 0; i < program->blocks.size();) {
827       Block& current = program->blocks[i++];
828 
829       if (current.kind & block_kind_discard_early_exit) {
830          /* Because the jump to the discard early exit block may happen anywhere in a block, it's
831           * not possible to join it with its predecessors this way.
832           * We emit all required waits when emitting the discard block.
833           */
834          continue;
835       }
836 
837       wait_ctx ctx = in_ctx[current.index];
838 
839       if (current.kind & block_kind_loop_header) {
840          loop_header_indices.push(current.index);
841       } else if (current.kind & block_kind_loop_exit) {
842          bool repeat = false;
843          if (loop_progress == loop_header_indices.size()) {
844             i = loop_header_indices.top();
845             repeat = true;
846          }
847          loop_header_indices.pop();
848          loop_progress = std::min<unsigned>(loop_progress, loop_header_indices.size());
849          if (repeat)
850             continue;
851       }
852 
853       bool changed = false;
854       for (unsigned b : current.linear_preds)
855          changed |= ctx.join(&out_ctx[b], false);
856       for (unsigned b : current.logical_preds)
857          changed |= ctx.join(&out_ctx[b], true);
858 
859       if (done[current.index] && !changed) {
860          in_ctx[current.index] = std::move(ctx);
861          continue;
862       } else {
863          in_ctx[current.index] = ctx;
864       }
865 
866       loop_progress = std::max<unsigned>(loop_progress, current.loop_nest_depth);
867       done[current.index] = true;
868 
869       handle_block(program, current, ctx);
870 
871       out_ctx[current.index] = std::move(ctx);
872    }
873 }
874 
875 } // namespace aco
876