xref: /aosp_15_r20/external/mesa3d/src/imagination/rogue/passes/rogue_lower_pseudo_ops.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2022 Imagination Technologies Ltd.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include "rogue.h"
25 #include "rogue_builder.h"
26 #include "util/macros.h"
27 
28 #include <stdbool.h>
29 
30 /**
31  * \file rogue_lower_pseudo_ops.c
32  *
33  * \brief Contains the rogue_lower_pseudo_ops pass.
34  */
35 
rogue_lower_FABS(rogue_builder * b,rogue_alu_instr * fabs)36 static inline bool rogue_lower_FABS(rogue_builder *b, rogue_alu_instr *fabs)
37 {
38    rogue_alu_instr *mbyp = rogue_MBYP(b, fabs->dst[0].ref, fabs->src[0].ref);
39    rogue_merge_instr_comment(&mbyp->instr, &fabs->instr, "fabs");
40    rogue_set_alu_src_mod(mbyp, 0, ROGUE_ALU_SRC_MOD_ABS);
41    rogue_instr_delete(&fabs->instr);
42 
43    return true;
44 }
45 
rogue_lower_FNEG(rogue_builder * b,rogue_alu_instr * fneg)46 static inline bool rogue_lower_FNEG(rogue_builder *b, rogue_alu_instr *fneg)
47 {
48    rogue_alu_instr *mbyp = rogue_MBYP(b, fneg->dst[0].ref, fneg->src[0].ref);
49    rogue_merge_instr_comment(&mbyp->instr, &fneg->instr, "fneg");
50    rogue_set_alu_src_mod(mbyp, 0, ROGUE_ALU_SRC_MOD_NEG);
51    rogue_instr_delete(&fneg->instr);
52 
53    return true;
54 }
55 
rogue_lower_FNABS(rogue_builder * b,rogue_alu_instr * fnabs)56 static inline bool rogue_lower_FNABS(rogue_builder *b, rogue_alu_instr *fnabs)
57 {
58    rogue_alu_instr *mbyp = rogue_MBYP(b, fnabs->dst[0].ref, fnabs->src[0].ref);
59    rogue_merge_instr_comment(&mbyp->instr, &fnabs->instr, "fnabs");
60    rogue_set_alu_src_mod(mbyp, 0, ROGUE_ALU_SRC_MOD_ABS);
61    rogue_set_alu_src_mod(mbyp, 0, ROGUE_ALU_SRC_MOD_NEG);
62    rogue_instr_delete(&fnabs->instr);
63 
64    return true;
65 }
66 
rogue_lower_MOV(rogue_builder * b,rogue_alu_instr * mov)67 static inline bool rogue_lower_MOV(rogue_builder *b, rogue_alu_instr *mov)
68 {
69    rogue_instr *instr;
70 
71    /* If we're writing to a vertex output register, we need to use uvsw.write.
72     */
73    if (rogue_ref_is_reg(&mov->dst[0].ref) &&
74        mov->dst[0].ref.reg->class == ROGUE_REG_CLASS_VTXOUT) {
75       instr = &rogue_UVSW_WRITE(b, mov->dst[0].ref, mov->src[0].ref)->instr;
76    } else if (rogue_ref_is_special_reg(&mov->src[0].ref)) {
77       /* If we're loading a special register, use a movc. */
78       rogue_alu_instr *alu = rogue_MOVC(b,
79                                         mov->dst[0].ref,
80                                         rogue_ref_io(ROGUE_IO_NONE),
81                                         rogue_ref_io(ROGUE_IO_NONE),
82                                         mov->src[0].ref,
83                                         rogue_ref_io(ROGUE_IO_NONE));
84       rogue_set_alu_dst_mod(alu, 0, ROGUE_ALU_DST_MOD_E0);
85       rogue_set_alu_dst_mod(alu, 0, ROGUE_ALU_DST_MOD_E1);
86       rogue_set_alu_dst_mod(alu, 0, ROGUE_ALU_DST_MOD_E2);
87       rogue_set_alu_dst_mod(alu, 0, ROGUE_ALU_DST_MOD_E3);
88 
89       instr = &alu->instr;
90    } else {
91       /* If we're moving an immediate value not in special constants,
92        * we need to do a bitwise bypass.
93        */
94       if (rogue_ref_is_imm(&mov->src[0].ref)) {
95          instr = &rogue_BYP0(b,
96                              rogue_ref_io(ROGUE_IO_FT0),
97                              mov->dst[0].ref,
98                              rogue_ref_io(ROGUE_IO_S0),
99                              rogue_ref_val(
100                                 rogue_ref_get_imm(&mov->src[0].ref)->imm.u32))
101                      ->instr;
102       } else {
103          instr = &rogue_MBYP(b, mov->dst[0].ref, mov->src[0].ref)->instr;
104       }
105    }
106 
107    rogue_merge_instr_comment(instr, &mov->instr, "mov");
108    rogue_instr_delete(&mov->instr);
109 
110    return true;
111 }
112 
rogue_lower_alu_instr(rogue_builder * b,rogue_alu_instr * alu)113 static inline bool rogue_lower_alu_instr(rogue_builder *b, rogue_alu_instr *alu)
114 {
115    switch (alu->op) {
116    case ROGUE_ALU_OP_MOV:
117       return rogue_lower_MOV(b, alu);
118 
119    case ROGUE_ALU_OP_FABS:
120       return rogue_lower_FABS(b, alu);
121 
122    case ROGUE_ALU_OP_FNEG:
123       return rogue_lower_FNEG(b, alu);
124 
125    case ROGUE_ALU_OP_FNABS:
126       return rogue_lower_FNABS(b, alu);
127 
128    default:
129       break;
130    }
131 
132    return false;
133 }
134 
rogue_lower_END(rogue_builder * b,rogue_ctrl_instr * end)135 static inline bool rogue_lower_END(rogue_builder *b, rogue_ctrl_instr *end)
136 {
137    rogue_ctrl_instr *nop = rogue_NOP(b);
138    rogue_merge_instr_comment(&nop->instr, &end->instr, "end");
139    rogue_set_ctrl_op_mod(nop, ROGUE_CTRL_OP_MOD_END);
140    rogue_instr_delete(&end->instr);
141 
142    return true;
143 }
144 
rogue_lower_ctrl_instr(rogue_builder * b,rogue_ctrl_instr * ctrl)145 static inline bool rogue_lower_ctrl_instr(rogue_builder *b,
146                                           rogue_ctrl_instr *ctrl)
147 {
148    switch (ctrl->op) {
149    case ROGUE_CTRL_OP_END:
150       return rogue_lower_END(b, ctrl);
151 
152    default:
153       break;
154    }
155 
156    return false;
157 }
158 
159 /* TODO: This should only really be called after a distribute_src_mods pass (to
160  * come later). */
161 PUBLIC
rogue_lower_pseudo_ops(rogue_shader * shader)162 bool rogue_lower_pseudo_ops(rogue_shader *shader)
163 {
164    if (shader->is_grouped)
165       return false;
166 
167    bool progress = false;
168 
169    rogue_builder b;
170    rogue_builder_init(&b, shader);
171 
172    rogue_foreach_instr_in_shader_safe (instr, shader) {
173       /* Skip real ops. */
174       if (rogue_instr_supported_phases(instr))
175          continue;
176 
177       b.cursor = rogue_cursor_before_instr(instr);
178       switch (instr->type) {
179       case ROGUE_INSTR_TYPE_ALU:
180          progress |= rogue_lower_alu_instr(&b, rogue_instr_as_alu(instr));
181          break;
182 
183       case ROGUE_INSTR_TYPE_CTRL:
184          progress |= rogue_lower_ctrl_instr(&b, rogue_instr_as_ctrl(instr));
185          break;
186 
187       default:
188          continue;
189       }
190    }
191 
192    return progress;
193 }
194