xref: /aosp_15_r20/external/mesa3d/src/amd/compiler/aco_optimizer_postRA.cpp (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2021 Valve Corporation
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "aco_builder.h"
8 #include "aco_ir.h"
9 
10 #include <algorithm>
11 #include <array>
12 #include <bitset>
13 #include <vector>
14 
15 namespace aco {
16 namespace {
17 
18 constexpr const size_t max_reg_cnt = 512;
19 constexpr const size_t max_sgpr_cnt = 128;
20 constexpr const size_t min_vgpr = 256;
21 constexpr const size_t max_vgpr_cnt = 256;
22 
23 struct Idx {
operator ==aco::__anon2425c1930111::Idx24    bool operator==(const Idx& other) const { return block == other.block && instr == other.instr; }
operator !=aco::__anon2425c1930111::Idx25    bool operator!=(const Idx& other) const { return !operator==(other); }
26 
foundaco::__anon2425c1930111::Idx27    bool found() const { return block != UINT32_MAX; }
28 
29    uint32_t block;
30    uint32_t instr;
31 };
32 
33 /** Indicates that a register was not yet written in the shader. */
34 Idx not_written_yet{UINT32_MAX, 0};
35 
36 /** Indicates that an operand is constant or undefined, not written by any instruction. */
37 Idx const_or_undef{UINT32_MAX, 2};
38 
39 /** Indicates that a register was overwritten by different instructions in previous blocks. */
40 Idx overwritten_untrackable{UINT32_MAX, 3};
41 
42 /** Indicates that there isn't a clear single writer, for example due to subdword operations. */
43 Idx overwritten_unknown_instr{UINT32_MAX, 4};
44 
45 struct pr_opt_ctx {
46    using Idx_array = std::array<Idx, max_reg_cnt>;
47 
48    Program* program;
49    Block* current_block;
50    uint32_t current_instr_idx;
51    std::vector<uint16_t> uses;
52    std::unique_ptr<Idx_array[]> instr_idx_by_regs;
53 
pr_opt_ctxaco::__anon2425c1930111::pr_opt_ctx54    pr_opt_ctx(Program* p)
55        : program(p), current_block(nullptr), current_instr_idx(0), uses(dead_code_analysis(p)),
56          instr_idx_by_regs(std::unique_ptr<Idx_array[]>{new Idx_array[p->blocks.size()]})
57    {}
58 
reset_block_regsaco::__anon2425c1930111::pr_opt_ctx59    ALWAYS_INLINE void reset_block_regs(const Block::edge_vec& preds, const unsigned block_index,
60                                        const unsigned min_reg, const unsigned num_regs)
61    {
62       const unsigned num_preds = preds.size();
63       const unsigned first_pred = preds[0];
64 
65       /* Copy information from the first predecessor. */
66       memcpy(&instr_idx_by_regs[block_index][min_reg], &instr_idx_by_regs[first_pred][min_reg],
67              num_regs * sizeof(Idx));
68 
69       /* Mark overwritten if it doesn't match with other predecessors. */
70       const unsigned until_reg = min_reg + num_regs;
71       for (unsigned i = 1; i < num_preds; ++i) {
72          unsigned pred = preds[i];
73          for (unsigned reg = min_reg; reg < until_reg; ++reg) {
74             Idx& idx = instr_idx_by_regs[block_index][reg];
75             if (idx == overwritten_untrackable)
76                continue;
77 
78             if (idx != instr_idx_by_regs[pred][reg])
79                idx = overwritten_untrackable;
80          }
81       }
82    }
83 
reset_blockaco::__anon2425c1930111::pr_opt_ctx84    void reset_block(Block* block)
85    {
86       current_block = block;
87       current_instr_idx = 0;
88 
89       if (block->linear_preds.empty()) {
90          std::fill(instr_idx_by_regs[block->index].begin(), instr_idx_by_regs[block->index].end(),
91                    not_written_yet);
92       } else if (block->kind & block_kind_loop_header) {
93          /* Instructions inside the loop may overwrite registers of temporaries that are
94           * not live inside the loop, but we can't detect that because we haven't processed
95           * the blocks in the loop yet. As a workaround, mark all registers as untrackable.
96           * TODO: Consider improving this in the future.
97           */
98          std::fill(instr_idx_by_regs[block->index].begin(), instr_idx_by_regs[block->index].end(),
99                    overwritten_untrackable);
100       } else {
101          reset_block_regs(block->linear_preds, block->index, 0, max_sgpr_cnt);
102          reset_block_regs(block->linear_preds, block->index, 251, 3);
103 
104          if (!block->logical_preds.empty()) {
105             /* We assume that VGPRs are only read by blocks which have a logical predecessor,
106              * ie. any block that reads any VGPR has at least 1 logical predecessor.
107              */
108             reset_block_regs(block->logical_preds, block->index, min_vgpr, max_vgpr_cnt);
109          } else {
110             /* If a block has no logical predecessors, it is not part of the
111              * logical CFG and therefore it also won't have any logical successors.
112              * Such a block does not write any VGPRs ever.
113              */
114             assert(block->logical_succs.empty());
115          }
116       }
117    }
118 
getaco::__anon2425c1930111::pr_opt_ctx119    Instruction* get(Idx idx) { return program->blocks[idx.block].instructions[idx.instr].get(); }
120 };
121 
122 void
save_reg_writes(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)123 save_reg_writes(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
124 {
125    for (const Definition& def : instr->definitions) {
126       assert(def.regClass().type() != RegType::sgpr || def.physReg().reg() <= 255);
127       assert(def.regClass().type() != RegType::vgpr || def.physReg().reg() >= 256);
128 
129       unsigned dw_size = DIV_ROUND_UP(def.bytes(), 4u);
130       unsigned r = def.physReg().reg();
131       Idx idx{ctx.current_block->index, ctx.current_instr_idx};
132 
133       if (def.regClass().is_subdword())
134          idx = overwritten_unknown_instr;
135 
136       assert((r + dw_size) <= max_reg_cnt);
137       assert(def.size() == dw_size || def.regClass().is_subdword());
138       std::fill(ctx.instr_idx_by_regs[ctx.current_block->index].begin() + r,
139                 ctx.instr_idx_by_regs[ctx.current_block->index].begin() + r + dw_size, idx);
140    }
141    if (instr->isPseudo() && instr->pseudo().needs_scratch_reg) {
142       if (!instr->pseudo().tmp_in_scc)
143          ctx.instr_idx_by_regs[ctx.current_block->index][scc] = overwritten_unknown_instr;
144       ctx.instr_idx_by_regs[ctx.current_block->index][instr->pseudo().scratch_sgpr] =
145          overwritten_unknown_instr;
146    }
147 }
148 
149 Idx
last_writer_idx(pr_opt_ctx & ctx,PhysReg physReg,RegClass rc)150 last_writer_idx(pr_opt_ctx& ctx, PhysReg physReg, RegClass rc)
151 {
152    /* Verify that all of the operand's registers are written by the same instruction. */
153    assert(physReg.reg() < max_reg_cnt);
154    Idx instr_idx = ctx.instr_idx_by_regs[ctx.current_block->index][physReg.reg()];
155    unsigned dw_size = DIV_ROUND_UP(rc.bytes(), 4u);
156    unsigned r = physReg.reg();
157    bool all_same =
158       std::all_of(ctx.instr_idx_by_regs[ctx.current_block->index].begin() + r,
159                   ctx.instr_idx_by_regs[ctx.current_block->index].begin() + r + dw_size,
160                   [instr_idx](Idx i) { return i == instr_idx; });
161 
162    return all_same ? instr_idx : overwritten_untrackable;
163 }
164 
165 Idx
last_writer_idx(pr_opt_ctx & ctx,const Operand & op)166 last_writer_idx(pr_opt_ctx& ctx, const Operand& op)
167 {
168    if (op.isConstant() || op.isUndefined())
169       return const_or_undef;
170 
171    return last_writer_idx(ctx, op.physReg(), op.regClass());
172 }
173 
174 /**
175  * Check whether a register has been overwritten since the given location.
176  * This is an important part of checking whether certain optimizations are
177  * valid.
178  * Note that the decision is made based on registers and not on SSA IDs.
179  */
180 bool
is_overwritten_since(pr_opt_ctx & ctx,PhysReg reg,RegClass rc,const Idx & since_idx,bool inclusive=false)181 is_overwritten_since(pr_opt_ctx& ctx, PhysReg reg, RegClass rc, const Idx& since_idx,
182                      bool inclusive = false)
183 {
184    /* If we didn't find an instruction, assume that the register is overwritten. */
185    if (!since_idx.found())
186       return true;
187 
188    /* TODO: We currently can't keep track of subdword registers. */
189    if (rc.is_subdword())
190       return true;
191 
192    unsigned begin_reg = reg.reg();
193    unsigned end_reg = begin_reg + rc.size();
194    unsigned current_block_idx = ctx.current_block->index;
195 
196    for (unsigned r = begin_reg; r < end_reg; ++r) {
197       Idx& i = ctx.instr_idx_by_regs[current_block_idx][r];
198       if (i == overwritten_untrackable && current_block_idx > since_idx.block)
199          return true;
200       else if (i == overwritten_untrackable || i == not_written_yet)
201          continue;
202       else if (i == overwritten_unknown_instr)
203          return true;
204 
205       assert(i.found());
206 
207       bool since_instr = inclusive ? i.instr >= since_idx.instr : i.instr > since_idx.instr;
208       if (i.block > since_idx.block || (i.block == since_idx.block && since_instr))
209          return true;
210    }
211 
212    return false;
213 }
214 
215 bool
is_overwritten_since(pr_opt_ctx & ctx,const Definition & def,const Idx & idx,bool inclusive=false)216 is_overwritten_since(pr_opt_ctx& ctx, const Definition& def, const Idx& idx, bool inclusive = false)
217 {
218    return is_overwritten_since(ctx, def.physReg(), def.regClass(), idx, inclusive);
219 }
220 
221 bool
is_overwritten_since(pr_opt_ctx & ctx,const Operand & op,const Idx & idx,bool inclusive=false)222 is_overwritten_since(pr_opt_ctx& ctx, const Operand& op, const Idx& idx, bool inclusive = false)
223 {
224    if (op.isConstant())
225       return false;
226 
227    return is_overwritten_since(ctx, op.physReg(), op.regClass(), idx, inclusive);
228 }
229 
230 void
try_apply_branch_vcc(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)231 try_apply_branch_vcc(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
232 {
233    /* We are looking for the following pattern:
234     *
235     * vcc = ...                      ; last_vcc_wr
236     * sX, scc = s_and_bXX vcc, exec  ; op0_instr
237     * (...vcc and exec must not be overwritten inbetween...)
238     * s_cbranch_XX scc               ; instr
239     *
240     * If possible, the above is optimized into:
241     *
242     * vcc = ...                      ; last_vcc_wr
243     * s_cbranch_XX vcc               ; instr modified to use vcc
244     */
245 
246    /* Don't try to optimize this on GFX6-7 because SMEM may corrupt the vccz bit. */
247    if (ctx.program->gfx_level < GFX8)
248       return;
249 
250    if (instr->format != Format::PSEUDO_BRANCH || instr->operands.size() == 0 ||
251        instr->operands[0].physReg() != scc)
252       return;
253 
254    Idx op0_instr_idx = last_writer_idx(ctx, instr->operands[0]);
255    Idx last_vcc_wr_idx = last_writer_idx(ctx, vcc, ctx.program->lane_mask);
256 
257    /* We need to make sure:
258     * - the instructions that wrote the operand register and VCC are both found
259     * - the operand register used by the branch, and VCC were both written in the current block
260     * - EXEC hasn't been overwritten since the last VCC write
261     * - VCC hasn't been overwritten since the operand register was written
262     *   (ie. the last VCC writer precedes the op0 writer)
263     */
264    if (!op0_instr_idx.found() || !last_vcc_wr_idx.found() ||
265        op0_instr_idx.block != ctx.current_block->index ||
266        last_vcc_wr_idx.block != ctx.current_block->index ||
267        is_overwritten_since(ctx, exec, ctx.program->lane_mask, last_vcc_wr_idx) ||
268        is_overwritten_since(ctx, vcc, ctx.program->lane_mask, op0_instr_idx))
269       return;
270 
271    Instruction* op0_instr = ctx.get(op0_instr_idx);
272    Instruction* last_vcc_wr = ctx.get(last_vcc_wr_idx);
273 
274    if ((op0_instr->opcode != aco_opcode::s_and_b64 /* wave64 */ &&
275         op0_instr->opcode != aco_opcode::s_and_b32 /* wave32 */) ||
276        op0_instr->operands[0].physReg() != vcc || op0_instr->operands[1].physReg() != exec ||
277        !last_vcc_wr->isVOPC())
278       return;
279 
280    assert(last_vcc_wr->definitions[0].tempId() == op0_instr->operands[0].tempId());
281 
282    /* Reduce the uses of the SCC def */
283    ctx.uses[instr->operands[0].tempId()]--;
284    /* Use VCC instead of SCC in the branch */
285    instr->operands[0] = op0_instr->operands[0];
286 }
287 
288 void
try_optimize_scc_nocompare(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)289 try_optimize_scc_nocompare(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
290 {
291    /* We are looking for the following pattern:
292     *
293     * s_bfe_u32 s0, s3, 0x40018  ; outputs SGPR and SCC if the SGPR != 0
294     * s_cmp_eq_i32 s0, 0         ; comparison between the SGPR and 0
295     * s_cbranch_scc0 BB3         ; use the result of the comparison, eg. branch or cselect
296     *
297     * If possible, the above is optimized into:
298     *
299     * s_bfe_u32 s0, s3, 0x40018  ; original instruction
300     * s_cbranch_scc1 BB3         ; modified to use SCC directly rather than the SGPR with comparison
301     *
302     */
303 
304    if (!instr->isSALU() && !instr->isBranch())
305       return;
306 
307    if (instr->isSOPC() &&
308        (instr->opcode == aco_opcode::s_cmp_eq_u32 || instr->opcode == aco_opcode::s_cmp_eq_i32 ||
309         instr->opcode == aco_opcode::s_cmp_lg_u32 || instr->opcode == aco_opcode::s_cmp_lg_i32 ||
310         instr->opcode == aco_opcode::s_cmp_eq_u64 || instr->opcode == aco_opcode::s_cmp_lg_u64) &&
311        (instr->operands[0].constantEquals(0) || instr->operands[1].constantEquals(0)) &&
312        (instr->operands[0].isTemp() || instr->operands[1].isTemp())) {
313       /* Make sure the constant is always in operand 1 */
314       if (instr->operands[0].isConstant())
315          std::swap(instr->operands[0], instr->operands[1]);
316 
317       /* Find the writer instruction of Operand 0. */
318       Idx wr_idx = last_writer_idx(ctx, instr->operands[0]);
319       if (!wr_idx.found())
320          return;
321 
322       Instruction* wr_instr = ctx.get(wr_idx);
323       if (!wr_instr->isSALU() || wr_instr->definitions.size() < 2 ||
324           wr_instr->definitions[1].physReg() != scc)
325          return;
326 
327       /* Look for instructions which set SCC := (D != 0) */
328       switch (wr_instr->opcode) {
329       case aco_opcode::s_bfe_i32:
330       case aco_opcode::s_bfe_i64:
331       case aco_opcode::s_bfe_u32:
332       case aco_opcode::s_bfe_u64:
333       case aco_opcode::s_and_b32:
334       case aco_opcode::s_and_b64:
335       case aco_opcode::s_andn2_b32:
336       case aco_opcode::s_andn2_b64:
337       case aco_opcode::s_or_b32:
338       case aco_opcode::s_or_b64:
339       case aco_opcode::s_orn2_b32:
340       case aco_opcode::s_orn2_b64:
341       case aco_opcode::s_xor_b32:
342       case aco_opcode::s_xor_b64:
343       case aco_opcode::s_not_b32:
344       case aco_opcode::s_not_b64:
345       case aco_opcode::s_nor_b32:
346       case aco_opcode::s_nor_b64:
347       case aco_opcode::s_xnor_b32:
348       case aco_opcode::s_xnor_b64:
349       case aco_opcode::s_nand_b32:
350       case aco_opcode::s_nand_b64:
351       case aco_opcode::s_lshl_b32:
352       case aco_opcode::s_lshl_b64:
353       case aco_opcode::s_lshr_b32:
354       case aco_opcode::s_lshr_b64:
355       case aco_opcode::s_ashr_i32:
356       case aco_opcode::s_ashr_i64:
357       case aco_opcode::s_abs_i32:
358       case aco_opcode::s_absdiff_i32: break;
359       default: return;
360       }
361 
362       /* Check whether both SCC and Operand 0 are written by the same instruction. */
363       Idx sccwr_idx = last_writer_idx(ctx, scc, s1);
364       if (wr_idx != sccwr_idx) {
365          /* Check whether the current instruction is the only user of its first operand. */
366          if (ctx.uses[wr_instr->definitions[1].tempId()] ||
367              ctx.uses[wr_instr->definitions[0].tempId()] > 1)
368             return;
369 
370          /* Check whether the operands of the writer are overwritten. */
371          for (const Operand& op : wr_instr->operands) {
372             if (is_overwritten_since(ctx, op, wr_idx))
373                return;
374          }
375 
376          aco_opcode pulled_opcode = wr_instr->opcode;
377          if (instr->opcode == aco_opcode::s_cmp_eq_u32 ||
378              instr->opcode == aco_opcode::s_cmp_eq_i32 ||
379              instr->opcode == aco_opcode::s_cmp_eq_u64) {
380             /* When s_cmp_eq is used, it effectively inverts the SCC def.
381              * However, we can't simply invert the opcodes here because that
382              * would change the meaning of the program.
383              */
384             return;
385          }
386 
387          Definition scc_def = instr->definitions[0];
388          ctx.uses[wr_instr->definitions[0].tempId()]--;
389 
390          /* Copy the writer instruction, but use SCC from the current instr.
391           * This means that the original instruction will be eliminated.
392           */
393          if (wr_instr->format == Format::SOP2) {
394             instr.reset(create_instruction(pulled_opcode, Format::SOP2, 2, 2));
395             instr->operands[1] = wr_instr->operands[1];
396          } else if (wr_instr->format == Format::SOP1) {
397             instr.reset(create_instruction(pulled_opcode, Format::SOP1, 1, 2));
398          }
399          instr->definitions[0] = wr_instr->definitions[0];
400          instr->definitions[1] = scc_def;
401          instr->operands[0] = wr_instr->operands[0];
402          return;
403       }
404 
405       /* Use the SCC def from wr_instr */
406       ctx.uses[instr->operands[0].tempId()]--;
407       instr->operands[0] = Operand(wr_instr->definitions[1].getTemp(), scc);
408       ctx.uses[instr->operands[0].tempId()]++;
409 
410       /* Set the opcode and operand to 32-bit */
411       instr->operands[1] = Operand::zero();
412       instr->opcode =
413          (instr->opcode == aco_opcode::s_cmp_eq_u32 || instr->opcode == aco_opcode::s_cmp_eq_i32 ||
414           instr->opcode == aco_opcode::s_cmp_eq_u64)
415             ? aco_opcode::s_cmp_eq_u32
416             : aco_opcode::s_cmp_lg_u32;
417    } else if ((instr->format == Format::PSEUDO_BRANCH && instr->operands.size() == 1 &&
418                instr->operands[0].physReg() == scc) ||
419               instr->opcode == aco_opcode::s_cselect_b32 ||
420               instr->opcode == aco_opcode::s_cselect_b64) {
421 
422       /* For cselect, operand 2 is the SCC condition */
423       unsigned scc_op_idx = 0;
424       if (instr->opcode == aco_opcode::s_cselect_b32 ||
425           instr->opcode == aco_opcode::s_cselect_b64) {
426          scc_op_idx = 2;
427       }
428 
429       Idx wr_idx = last_writer_idx(ctx, instr->operands[scc_op_idx]);
430       if (!wr_idx.found())
431          return;
432 
433       Instruction* wr_instr = ctx.get(wr_idx);
434 
435       /* Check if we found the pattern above. */
436       if (wr_instr->opcode != aco_opcode::s_cmp_eq_u32 &&
437           wr_instr->opcode != aco_opcode::s_cmp_lg_u32)
438          return;
439       if (wr_instr->operands[0].physReg() != scc)
440          return;
441       if (!wr_instr->operands[1].constantEquals(0))
442          return;
443 
444       /* The optimization can be unsafe when there are other users. */
445       if (ctx.uses[instr->operands[scc_op_idx].tempId()] > 1)
446          return;
447 
448       if (wr_instr->opcode == aco_opcode::s_cmp_eq_u32) {
449          /* Flip the meaning of the instruction to correctly use the SCC. */
450          if (instr->format == Format::PSEUDO_BRANCH)
451             instr->opcode = instr->opcode == aco_opcode::p_cbranch_z ? aco_opcode::p_cbranch_nz
452                                                                      : aco_opcode::p_cbranch_z;
453          else if (instr->opcode == aco_opcode::s_cselect_b32 ||
454                   instr->opcode == aco_opcode::s_cselect_b64)
455             std::swap(instr->operands[0], instr->operands[1]);
456          else
457             unreachable(
458                "scc_nocompare optimization is only implemented for p_cbranch and s_cselect");
459       }
460 
461       /* Use the SCC def from the original instruction, not the comparison */
462       ctx.uses[instr->operands[scc_op_idx].tempId()]--;
463       instr->operands[scc_op_idx] = wr_instr->operands[0];
464    }
465 }
466 
467 static bool
is_scc_copy(const Instruction * instr)468 is_scc_copy(const Instruction* instr)
469 {
470    return instr->opcode == aco_opcode::p_parallelcopy && instr->operands.size() == 1 &&
471           instr->operands[0].isTemp() && instr->operands[0].physReg().reg() == scc;
472 }
473 
474 void
save_scc_copy_producer(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)475 save_scc_copy_producer(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
476 {
477    if (!is_scc_copy(instr.get()))
478       return;
479 
480    Idx wr_idx = last_writer_idx(ctx, instr->operands[0]);
481    if (wr_idx.found() && wr_idx.block == ctx.current_block->index)
482       instr->pass_flags = wr_idx.instr;
483    else
484       instr->pass_flags = UINT32_MAX;
485 }
486 
487 void
try_eliminate_scc_copy(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)488 try_eliminate_scc_copy(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
489 {
490    /* Try to eliminate an SCC copy by duplicating the instruction that produced the SCC. */
491 
492    if (instr->opcode != aco_opcode::p_parallelcopy || instr->definitions.size() != 1 ||
493        instr->definitions[0].physReg().reg() != scc)
494       return;
495 
496    /* Find the instruction that copied SCC into an SGPR. */
497    Idx wr_idx = last_writer_idx(ctx, instr->operands[0]);
498    if (!wr_idx.found())
499       return;
500 
501    const Instruction* wr_instr = ctx.get(wr_idx);
502    if (!is_scc_copy(wr_instr) || wr_instr->pass_flags == UINT32_MAX)
503       return;
504 
505    Idx producer_idx = {wr_idx.block, wr_instr->pass_flags};
506    Instruction* producer_instr = ctx.get(producer_idx);
507 
508    if (!producer_instr || !producer_instr->isSALU())
509       return;
510 
511    /* Verify that the operands of the producer instruction haven't been overwritten. */
512    for (const Operand& op : producer_instr->operands) {
513       if (is_overwritten_since(ctx, op, producer_idx, true))
514          return;
515    }
516 
517    /* Verify that the definitions (except SCC) of the producer haven't been overwritten. */
518    for (const Definition& def : producer_instr->definitions) {
519       if (def.physReg().reg() == scc)
520          continue;
521       if (is_overwritten_since(ctx, def, producer_idx))
522          return;
523    }
524 
525    /* Duplicate the original producer of the SCC */
526    Definition scc_def = instr->definitions[0];
527    instr.reset(create_instruction(producer_instr->opcode, producer_instr->format,
528                                   producer_instr->operands.size(),
529                                   producer_instr->definitions.size()));
530    instr->salu().imm = producer_instr->salu().imm;
531 
532    /* The copy is no longer needed. */
533    if (--ctx.uses[wr_instr->definitions[0].tempId()] == 0)
534       ctx.uses[wr_instr->operands[0].tempId()]--;
535 
536    /* Copy the operands of the original producer. */
537    for (unsigned i = 0; i < producer_instr->operands.size(); ++i) {
538       instr->operands[i] = producer_instr->operands[i];
539       if (producer_instr->operands[i].isTemp() && !is_dead(ctx.uses, producer_instr))
540          ctx.uses[producer_instr->operands[i].tempId()]++;
541    }
542 
543    /* Copy the definitions of the original producer,
544     * but mark them as non-temp to keep SSA quasi-intact.
545     */
546    for (unsigned i = 0; i < producer_instr->definitions.size(); ++i)
547       instr->definitions[i] = Definition(producer_instr->definitions[i].physReg(),
548                                          producer_instr->definitions[i].regClass());
549    instr->definitions.back() = scc_def; /* Keep temporary ID. */
550 }
551 
552 void
try_combine_dpp(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)553 try_combine_dpp(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
554 {
555    /* We are looking for the following pattern:
556     *
557     * v_mov_dpp vA, vB, ...      ; move instruction with DPP
558     * v_xxx vC, vA, ...          ; current instr that uses the result from the move
559     *
560     * If possible, the above is optimized into:
561     *
562     * v_xxx_dpp vC, vB, ...      ; current instr modified to use DPP directly
563     *
564     */
565 
566    if (!instr->isVALU() || instr->isDPP())
567       return;
568 
569    for (unsigned i = 0; i < instr->operands.size(); i++) {
570       Idx op_instr_idx = last_writer_idx(ctx, instr->operands[i]);
571       if (!op_instr_idx.found())
572          continue;
573 
574       /* is_overwritten_since only considers active lanes when the register could possibly
575        * have been overwritten from inactive lanes. Restrict this optimization to at most
576        * one block so that there is no possibility for clobbered inactive lanes.
577        */
578       if (ctx.current_block->index - op_instr_idx.block > 1)
579          continue;
580 
581       const Instruction* mov = ctx.get(op_instr_idx);
582       if (mov->opcode != aco_opcode::v_mov_b32 || !mov->isDPP())
583          continue;
584 
585       /* If we aren't going to remove the v_mov_b32, we have to ensure that it doesn't overwrite
586        * it's own operand before we use it.
587        */
588       if (mov->definitions[0].physReg() == mov->operands[0].physReg() &&
589           (!mov->definitions[0].tempId() || ctx.uses[mov->definitions[0].tempId()] > 1))
590          continue;
591 
592       /* Don't propagate DPP if the source register is overwritten since the move. */
593       if (is_overwritten_since(ctx, mov->operands[0], op_instr_idx))
594          continue;
595 
596       bool dpp8 = mov->isDPP8();
597 
598       /* Fetch-inactive means exec is ignored, which allows us to combine across exec changes. */
599       if (!(dpp8 ? mov->dpp8().fetch_inactive : mov->dpp16().fetch_inactive) &&
600           is_overwritten_since(ctx, Operand(exec, ctx.program->lane_mask), op_instr_idx))
601          continue;
602 
603       /* We won't eliminate the DPP mov if the operand is used twice */
604       bool op_used_twice = false;
605       for (unsigned j = 0; j < instr->operands.size(); j++)
606          op_used_twice |= i != j && instr->operands[i] == instr->operands[j];
607       if (op_used_twice)
608          continue;
609 
610       bool input_mods = can_use_input_modifiers(ctx.program->gfx_level, instr->opcode, i) &&
611                         get_operand_size(instr, i) == 32;
612       bool mov_uses_mods = mov->valu().neg[0] || mov->valu().abs[0];
613       if (((dpp8 && ctx.program->gfx_level < GFX11) || !input_mods) && mov_uses_mods)
614          continue;
615 
616       if (i != 0) {
617          if (!can_swap_operands(instr, &instr->opcode, 0, i))
618             continue;
619          instr->valu().swapOperands(0, i);
620       }
621 
622       if (!can_use_DPP(ctx.program->gfx_level, instr, dpp8))
623          continue;
624 
625       if (!dpp8) /* anything else doesn't make sense in SSA */
626          assert(mov->dpp16().row_mask == 0xf && mov->dpp16().bank_mask == 0xf);
627 
628       if (--ctx.uses[mov->definitions[0].tempId()])
629          ctx.uses[mov->operands[0].tempId()]++;
630 
631       convert_to_DPP(ctx.program->gfx_level, instr, dpp8);
632 
633       instr->operands[0] = mov->operands[0];
634 
635       if (dpp8) {
636          DPP8_instruction* dpp = &instr->dpp8();
637          dpp->lane_sel = mov->dpp8().lane_sel;
638          dpp->fetch_inactive = mov->dpp8().fetch_inactive;
639          if (mov_uses_mods)
640             instr->format = asVOP3(instr->format);
641       } else {
642          DPP16_instruction* dpp = &instr->dpp16();
643          dpp->dpp_ctrl = mov->dpp16().dpp_ctrl;
644          dpp->bound_ctrl = true;
645          dpp->fetch_inactive = mov->dpp16().fetch_inactive;
646       }
647       instr->valu().neg[0] ^= mov->valu().neg[0] && !instr->valu().abs[0];
648       instr->valu().abs[0] |= mov->valu().abs[0];
649       return;
650    }
651 }
652 
653 unsigned
num_encoded_alu_operands(const aco_ptr<Instruction> & instr)654 num_encoded_alu_operands(const aco_ptr<Instruction>& instr)
655 {
656    if (instr->isSALU()) {
657       if (instr->isSOP2() || instr->isSOPC())
658          return 2;
659       else if (instr->isSOP1())
660          return 1;
661 
662       return 0;
663    }
664 
665    if (instr->isVALU()) {
666       if (instr->isVOP1())
667          return 1;
668       else if (instr->isVOPC() || instr->isVOP2())
669          return 2;
670       else if (instr->opcode == aco_opcode::v_writelane_b32_e64 ||
671                instr->opcode == aco_opcode::v_writelane_b32)
672          return 2; /* potentially VOP3, but reads VDST as SRC2 */
673       else if (instr->isVOP3() || instr->isVOP3P() || instr->isVINTERP_INREG())
674          return instr->operands.size();
675    }
676 
677    return 0;
678 }
679 
680 void
try_reassign_split_vector(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)681 try_reassign_split_vector(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
682 {
683    /* Any unused split_vector definition can always use the same register
684     * as the operand. This avoids creating unnecessary copies.
685     */
686    if (instr->opcode == aco_opcode::p_split_vector) {
687       Operand& op = instr->operands[0];
688       if (!op.isTemp() || op.isKill())
689          return;
690 
691       PhysReg reg = op.physReg();
692       for (Definition& def : instr->definitions) {
693          if (def.getTemp().type() == op.getTemp().type() && def.isKill())
694             def.setFixed(reg);
695 
696          reg = reg.advance(def.bytes());
697       }
698 
699       return;
700    }
701 
702    /* We are looking for the following pattern:
703     *
704     * sA, sB = p_split_vector s[X:Y]
705     * ... X and Y not overwritten here ...
706     * use sA or sB <--- current instruction
707     *
708     * If possible, we propagate the registers from the p_split_vector
709     * operand into the current instruction and the above is optimized into:
710     *
711     * use sX or sY
712     *
713     * Thereby, we might violate register assignment rules.
714     * This optimization exists because it's too difficult to solve it
715     * in RA, and should be removed after we solved this in RA.
716     */
717 
718    if (!instr->isVALU() && !instr->isSALU())
719       return;
720 
721    for (unsigned i = 0; i < num_encoded_alu_operands(instr); i++) {
722       /* Find the instruction that writes the current operand. */
723       const Operand& op = instr->operands[i];
724       Idx op_instr_idx = last_writer_idx(ctx, op);
725       if (!op_instr_idx.found())
726          continue;
727 
728       /* Check if the operand is written by p_split_vector. */
729       Instruction* split_vec = ctx.get(op_instr_idx);
730       if (split_vec->opcode != aco_opcode::p_split_vector &&
731           split_vec->opcode != aco_opcode::p_extract_vector)
732          continue;
733 
734       Operand& split_op = split_vec->operands[0];
735 
736       /* Don't do anything if the p_split_vector operand is not a temporary
737        * or is killed by the p_split_vector.
738        * In this case the definitions likely already reuse the same registers as the operand.
739        */
740       if (!split_op.isTemp() || split_op.isKill())
741          continue;
742 
743       /* Only propagate operands of the same type */
744       if (split_op.getTemp().type() != op.getTemp().type())
745          continue;
746 
747       /* Check if the p_split_vector operand's registers are overwritten. */
748       if (is_overwritten_since(ctx, split_op, op_instr_idx))
749          continue;
750 
751       PhysReg reg = split_op.physReg();
752       if (split_vec->opcode == aco_opcode::p_extract_vector) {
753          reg =
754             reg.advance(split_vec->definitions[0].bytes() * split_vec->operands[1].constantValue());
755       }
756       for (Definition& def : split_vec->definitions) {
757          if (def.getTemp() != op.getTemp()) {
758             reg = reg.advance(def.bytes());
759             continue;
760          }
761 
762          /* Don't propagate misaligned SGPRs.
763           * Note: No ALU instruction can take a variable larger than 64bit.
764           */
765          if (op.regClass() == s2 && reg.reg() % 2 != 0)
766             break;
767 
768          /* Sub dword operands might need updates to SDWA/opsel,
769           * but we only track full register writes at the moment.
770           */
771          assert(op.physReg().byte() == reg.byte());
772 
773          /* If there is only one use (left), recolor the split_vector definition */
774          if (ctx.uses[op.tempId()] == 1)
775             def.setFixed(reg);
776          else
777             ctx.uses[op.tempId()]--;
778 
779          /* Use the p_split_vector operand register directly.
780           *
781           * Note: this might violate register assignment rules to some extend
782           *       in case the definition does not get recolored, eventually.
783           */
784          instr->operands[i].setFixed(reg);
785          break;
786       }
787    }
788 }
789 
790 void
try_convert_fma_to_vop2(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)791 try_convert_fma_to_vop2(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
792 {
793    /* We convert v_fma_f32 with inline constant to fmamk/fmaak.
794     * This is only benefical if it allows more VOPD.
795     */
796    if (ctx.program->gfx_level < GFX11 || ctx.program->wave_size != 32 ||
797        instr->opcode != aco_opcode::v_fma_f32 || instr->usesModifiers())
798       return;
799 
800    int constant_idx = -1;
801    int vgpr_idx = -1;
802    for (int i = 0; i < 3; i++) {
803       const Operand& op = instr->operands[i];
804       if (op.isConstant() && !op.isLiteral())
805          constant_idx = i;
806       else if (op.isOfType(RegType::vgpr))
807          vgpr_idx = i;
808       else
809          return;
810    }
811 
812    if (constant_idx < 0 || vgpr_idx < 0)
813       return;
814 
815    std::swap(instr->operands[constant_idx], instr->operands[2]);
816    if (constant_idx == 0 || vgpr_idx == 0)
817       std::swap(instr->operands[0], instr->operands[1]);
818    instr->operands[2] = Operand::literal32(instr->operands[2].constantValue());
819    instr->opcode = constant_idx == 2 ? aco_opcode::v_fmaak_f32 : aco_opcode::v_fmamk_f32;
820    instr->format = Format::VOP2;
821 }
822 
823 void
process_instruction(pr_opt_ctx & ctx,aco_ptr<Instruction> & instr)824 process_instruction(pr_opt_ctx& ctx, aco_ptr<Instruction>& instr)
825 {
826    /* Don't try to optimize instructions which are already dead. */
827    if (!instr || is_dead(ctx.uses, instr.get())) {
828       instr.reset();
829       ctx.current_instr_idx++;
830       return;
831    }
832 
833    try_apply_branch_vcc(ctx, instr);
834 
835    try_optimize_scc_nocompare(ctx, instr);
836 
837    try_combine_dpp(ctx, instr);
838 
839    try_reassign_split_vector(ctx, instr);
840 
841    try_convert_fma_to_vop2(ctx, instr);
842 
843    try_eliminate_scc_copy(ctx, instr);
844 
845    save_scc_copy_producer(ctx, instr);
846 
847    save_reg_writes(ctx, instr);
848 
849    ctx.current_instr_idx++;
850 }
851 
852 } // namespace
853 
854 void
optimize_postRA(Program * program)855 optimize_postRA(Program* program)
856 {
857    pr_opt_ctx ctx(program);
858 
859    /* Forward pass
860     * Goes through each instruction exactly once, and can transform
861     * instructions or adjust the use counts of temps.
862     */
863    for (auto& block : program->blocks) {
864       ctx.reset_block(&block);
865 
866       for (aco_ptr<Instruction>& instr : block.instructions)
867          process_instruction(ctx, instr);
868 
869       /* SCC might get overwritten by copies or swaps from parallelcopies
870        * inserted by SSA-elimination for linear phis.
871        */
872       if (!block.scc_live_out)
873          ctx.instr_idx_by_regs[block.index][scc] = overwritten_unknown_instr;
874    }
875 
876    /* Cleanup pass
877     * Gets rid of instructions which are manually deleted or
878     * no longer have any uses.
879     */
880    for (auto& block : program->blocks) {
881       std::vector<aco_ptr<Instruction>> instructions;
882       instructions.reserve(block.instructions.size());
883 
884       for (aco_ptr<Instruction>& instr : block.instructions) {
885          if (!instr || is_dead(ctx.uses, instr.get()))
886             continue;
887 
888          instructions.emplace_back(std::move(instr));
889       }
890 
891       block.instructions = std::move(instructions);
892    }
893 }
894 
895 } // namespace aco
896