1 /*
2 * Copyright © 2023 Imagination Technologies Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdint.h>
26
27 #include "pvr_uscgen.h"
28 #include "rogue/rogue.h"
29 #include "rogue/rogue_builder.h"
30 #include "util/u_dynarray.h"
31
pvr_uscgen_tq_frag(const struct pvr_tq_shader_properties * shader_props,struct pvr_tq_frag_sh_reg_layout * sh_reg_layout,unsigned * temps_used,struct util_dynarray * binary)32 void pvr_uscgen_tq_frag(const struct pvr_tq_shader_properties *shader_props,
33 struct pvr_tq_frag_sh_reg_layout *sh_reg_layout,
34 unsigned *temps_used,
35 struct util_dynarray *binary)
36 {
37 rogue_builder b;
38 rogue_shader *shader = rogue_shader_create(NULL, MESA_SHADER_NONE);
39
40 unsigned smp_coord_size = 2;
41 unsigned smp_coord_idx = 0;
42 rogue_regarray *smp_coords;
43
44 unsigned channels = 0;
45 unsigned output_idx = 1;
46 rogue_regarray *outputs = NULL;
47
48 unsigned image_state_size = 4;
49 unsigned image_state_idx;
50 rogue_regarray *image_state;
51
52 unsigned smp_state_size = 4;
53 unsigned smp_state_idx;
54 rogue_regarray *smp_state;
55
56 rogue_set_shader_name(shader, "TQ (fragment)");
57 rogue_builder_init(&b, shader);
58 rogue_push_block(&b);
59
60 smp_coords =
61 rogue_ssa_vec_regarray(b.shader, smp_coord_size, smp_coord_idx, 0);
62
63 /* TODO: Unrestrict. */
64 assert(shader_props->full_rate == false);
65 assert(shader_props->pick_component == false);
66
67 const struct pvr_tq_layer_properties *layer_props =
68 &shader_props->layer_props;
69 uint32_t loads;
70
71 /* TODO: Unrestrict. */
72 assert(layer_props->msaa == false);
73 assert(layer_props->sample_count == 1U);
74 assert(layer_props->resolve_op == PVR_RESOLVE_BLEND);
75 assert(layer_props->pbe_format == PVR_TRANSFER_PBE_PIXEL_SRC_RAW64 ||
76 layer_props->pbe_format == PVR_TRANSFER_PBE_PIXEL_SRC_RAW128);
77 assert(layer_props->sample == false);
78 assert(layer_props->layer_floats == PVR_INT_COORD_SET_FLOATS_0);
79 assert(layer_props->byte_unwind == 0);
80 assert(layer_props->linear == false);
81
82 loads = pvr_pbe_pixel_num_loads(layer_props->pbe_format);
83 for (uint32_t load = 0; load < loads; ++load) {
84 if (shader_props->iterated) {
85 /* TODO: feed{back,forward} the coeff index to/from shader_info. */
86 unsigned coeff_index = 0;
87 rogue_regarray *coeffs =
88 rogue_coeff_regarray(b.shader, smp_coord_size * 4, coeff_index);
89
90 rogue_instr *instr = &rogue_FITR_PIXEL(&b,
91 rogue_ref_regarray(smp_coords),
92 rogue_ref_drc(0),
93 rogue_ref_regarray(coeffs),
94 rogue_ref_val(smp_coord_size))
95 ->instr;
96 rogue_add_instr_comment(instr, "load_iterated");
97 } else {
98 rogue_instr *instr;
99 rogue_regarray *smp_coord_x =
100 rogue_ssa_vec_regarray(b.shader, 1, smp_coord_idx, 0);
101 rogue_regarray *smp_coord_y =
102 rogue_ssa_vec_regarray(b.shader, 1, smp_coord_idx, 1);
103
104 /* (X,Y).P, pixel (X,Y) coordinates, pixel mode. */
105 rogue_reg *in_x = rogue_special_reg(b.shader, 97);
106 rogue_reg *in_y = rogue_special_reg(b.shader, 100);
107
108 instr =
109 &rogue_MOV(&b, rogue_ref_regarray(smp_coord_x), rogue_ref_reg(in_x))
110 ->instr;
111 rogue_add_instr_comment(instr, "load_x");
112
113 instr =
114 &rogue_MOV(&b, rogue_ref_regarray(smp_coord_y), rogue_ref_reg(in_y))
115 ->instr;
116 rogue_add_instr_comment(instr, "load_y");
117 }
118
119 if (!layer_props->msaa) {
120 } else {
121 unreachable("Unsupported layer property (MSAA).");
122 }
123 }
124
125 /* Source conversion. */
126 switch (layer_props->pbe_format) {
127 case PVR_TRANSFER_PBE_PIXEL_SRC_RAW64:
128 case PVR_TRANSFER_PBE_PIXEL_SRC_RAW128:
129 break;
130
131 default:
132 unreachable("Unsupported layer property (format).");
133 }
134
135 /* TODO: Select the texture_regs index appropriately. */
136 assert(sh_reg_layout->combined_image_samplers.count == 1);
137 image_state_idx = sh_reg_layout->combined_image_samplers.offsets[0].image;
138 image_state =
139 rogue_shared_regarray(b.shader, image_state_size, image_state_idx);
140
141 smp_state_idx = sh_reg_layout->combined_image_samplers.offsets[0].sampler;
142 smp_state = rogue_shared_regarray(b.shader, smp_state_size, smp_state_idx);
143
144 /* Pack/blend phase. */
145 rogue_backend_instr *smp2d;
146
147 switch (layer_props->pbe_format) {
148 case PVR_TRANSFER_PBE_PIXEL_SRC_RAW64:
149 case PVR_TRANSFER_PBE_PIXEL_SRC_RAW128: {
150 switch (layer_props->pbe_format) {
151 case PVR_TRANSFER_PBE_PIXEL_SRC_RAW64:
152 channels = 2;
153 break;
154
155 case PVR_TRANSFER_PBE_PIXEL_SRC_RAW128:
156 channels = 4;
157 break;
158
159 default:
160 unreachable("Unsupported layer property (format).");
161 }
162
163 outputs = rogue_ssa_vec_regarray(b.shader, channels, output_idx, 0);
164
165 smp2d = rogue_SMP2D(&b,
166 rogue_ref_regarray(outputs),
167 rogue_ref_drc(0),
168 rogue_ref_regarray(image_state),
169 rogue_ref_regarray(smp_coords),
170 rogue_ref_regarray(smp_state),
171 rogue_ref_io(ROGUE_IO_NONE),
172 rogue_ref_val(channels));
173 rogue_set_backend_op_mod(smp2d, ROGUE_BACKEND_OP_MOD_SLCWRITEBACK);
174 rogue_add_instr_comment(&smp2d->instr, "pack/blend");
175
176 if (!shader_props->iterated)
177 rogue_set_backend_op_mod(smp2d, ROGUE_BACKEND_OP_MOD_NNCOORDS);
178 break;
179 }
180
181 default:
182 unreachable("Unsupported layer property (format).");
183 }
184
185 assert(channels && outputs);
186
187 /* Copy outputs. */
188 for (unsigned u = 0; u < channels; ++u) {
189 rogue_regarray *output_elem =
190 rogue_ssa_vec_regarray(b.shader, 1, output_idx, u);
191 rogue_reg *pixout_elem = rogue_pixout_reg(b.shader, u);
192 rogue_MOV(&b,
193 rogue_ref_reg(pixout_elem),
194 rogue_ref_regarray(output_elem));
195 }
196
197 rogue_END(&b);
198
199 rogue_shader_passes(shader);
200 rogue_encode_shader(NULL, shader, binary);
201
202 *temps_used = rogue_count_used_regs(shader, ROGUE_REG_CLASS_TEMP);
203
204 sh_reg_layout->compiler_out.usc_constants.count = 0;
205 sh_reg_layout->compiler_out_total = 0;
206
207 ralloc_free(shader);
208 }
209
pvr_uscgen_tq_eot(unsigned rt_count,const uint64_t * pbe_regs,struct util_dynarray * binary)210 void pvr_uscgen_tq_eot(unsigned rt_count,
211 const uint64_t *pbe_regs,
212 struct util_dynarray *binary)
213 {
214 rogue_builder b;
215 rogue_shader *shader = rogue_shader_create(NULL, MESA_SHADER_NONE);
216 rogue_set_shader_name(shader, "TQ (EOT)");
217 rogue_builder_init(&b, shader);
218 rogue_push_block(&b);
219
220 rogue_backend_instr *emitpix = NULL;
221 for (unsigned u = 0; u < rt_count; ++u) {
222 if (u > 0)
223 rogue_WOP(&b);
224
225 rogue_reg *state_word_0 = rogue_shared_reg(shader, pbe_regs[u]);
226 rogue_reg *state_word_1 = rogue_shared_reg(shader, pbe_regs[u] + 1);
227
228 emitpix = rogue_EMITPIX(&b,
229 rogue_ref_reg(state_word_0),
230 rogue_ref_reg(state_word_1));
231 }
232
233 assert(emitpix);
234
235 rogue_set_backend_op_mod(emitpix, ROGUE_BACKEND_OP_MOD_FREEP);
236 rogue_END(&b);
237
238 rogue_shader_passes(shader);
239 rogue_encode_shader(NULL, shader, binary);
240
241 ralloc_free(shader);
242 }
243