xref: /aosp_15_r20/external/mesa3d/src/gallium/drivers/svga/svga_state_vs.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright (c) 2008-2024 Broadcom. All Rights Reserved.
3  * The term “Broadcom” refers to Broadcom Inc.
4  * and/or its subsidiaries.
5  * SPDX-License-Identifier: MIT
6  */
7 
8 #include "util/u_inlines.h"
9 #include "pipe/p_defines.h"
10 #include "util/u_math.h"
11 #include "util/u_memory.h"
12 #include "util/u_bitmask.h"
13 #include "translate/translate.h"
14 #include "tgsi/tgsi_ureg.h"
15 
16 #include "svga_context.h"
17 #include "svga_state.h"
18 #include "svga_cmd.h"
19 #include "svga_shader.h"
20 #include "svga_tgsi.h"
21 
22 #include "svga_hw_reg.h"
23 
24 
25 /**
26  * If we fail to compile a vertex shader we'll use a dummy/fallback shader
27  * that simply emits a (0,0,0,1) vertex position.
28  */
29 static const struct tgsi_token *
get_dummy_vertex_shader(void)30 get_dummy_vertex_shader(void)
31 {
32    static const float zero[4] = { 0.0, 0.0, 0.0, 1.0 };
33    struct ureg_program *ureg;
34    const struct tgsi_token *tokens;
35    struct ureg_src src;
36    struct ureg_dst dst;
37 
38    ureg = ureg_create(PIPE_SHADER_VERTEX);
39    if (!ureg)
40       return NULL;
41 
42    dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
43    src = ureg_DECL_immediate(ureg, zero, 4);
44    ureg_MOV(ureg, dst, src);
45    ureg_END(ureg);
46 
47    tokens = ureg_get_tokens(ureg, NULL);
48 
49    ureg_destroy(ureg);
50 
51    return tokens;
52 }
53 
54 
55 /**
56  * Replace the given shader's instruction with a simple / dummy shader.
57  * We use this when normal shader translation fails.
58  */
59 struct svga_shader_variant *
svga_get_compiled_dummy_vertex_shader(struct svga_context * svga,struct svga_shader * shader,const struct svga_compile_key * key)60 svga_get_compiled_dummy_vertex_shader(struct svga_context *svga,
61                                       struct svga_shader *shader,
62                                       const struct svga_compile_key *key)
63 {
64    struct svga_vertex_shader *vs = (struct svga_vertex_shader *)shader;
65    const struct tgsi_token *dummy = get_dummy_vertex_shader();
66    struct svga_shader_variant *variant;
67 
68    if (!dummy) {
69       return NULL;
70    }
71 
72    FREE((void *) vs->base.tokens);
73    vs->base.tokens = dummy;
74 
75    svga_tgsi_scan_shader(&vs->base);
76 
77    variant = svga_tgsi_compile_shader(svga, shader, key);
78    return variant;
79 }
80 
81 
82 /* SVGA_NEW_PRESCALE, SVGA_NEW_RAST, SVGA_NEW_FS
83  */
84 static void
make_vs_key(struct svga_context * svga,struct svga_compile_key * key)85 make_vs_key(struct svga_context *svga, struct svga_compile_key *key)
86 {
87    struct svga_vertex_shader *vs = svga->curr.vs;
88 
89    memset(key, 0, sizeof *key);
90 
91    if (svga->state.sw.need_swtnl && svga_have_vgpu10(svga)) {
92       /* Set both of these flags, to match compile_passthrough_vs() */
93       key->vs.passthrough = 1;
94       key->vs.undo_viewport = 1;
95       return;
96    }
97 
98    if (svga_have_vgpu10(svga)) {
99       key->vs.need_vertex_id_bias = 1;
100    }
101 
102    /* SVGA_NEW_PRESCALE */
103    key->vs.need_prescale = svga->state.hw_clear.prescale[0].enabled &&
104                            (svga->curr.tes == NULL) &&
105                            (svga->curr.gs == NULL);
106 
107    /* SVGA_NEW_RAST */
108    key->vs.allow_psiz = svga->curr.rast->templ.point_size_per_vertex;
109 
110    /* SVGA_NEW_FS */
111    key->vs.fs_generic_inputs = svga->curr.fs->base.info.generic_inputs_mask;
112 
113    svga_remap_generics(key->vs.fs_generic_inputs, key->generic_remap_table);
114 
115    /* SVGA_NEW_VELEMENT */
116    key->vs.adjust_attrib_range = svga->curr.velems->adjust_attrib_range;
117    key->vs.adjust_attrib_w_1 = svga->curr.velems->adjust_attrib_w_1;
118    key->vs.attrib_is_pure_int = svga->curr.velems->attrib_is_pure_int;
119    key->vs.adjust_attrib_itof = svga->curr.velems->adjust_attrib_itof;
120    key->vs.adjust_attrib_utof = svga->curr.velems->adjust_attrib_utof;
121    key->vs.attrib_is_bgra = svga->curr.velems->attrib_is_bgra;
122    key->vs.attrib_puint_to_snorm = svga->curr.velems->attrib_puint_to_snorm;
123    key->vs.attrib_puint_to_uscaled = svga->curr.velems->attrib_puint_to_uscaled;
124    key->vs.attrib_puint_to_sscaled = svga->curr.velems->attrib_puint_to_sscaled;
125 
126    /* SVGA_NEW_TEXTURE_BINDING | SVGA_NEW_SAMPLER */
127    svga_init_shader_key_common(svga, PIPE_SHADER_VERTEX, &vs->base, key);
128 
129    /* SVGA_NEW_RAST */
130    key->clip_plane_enable = svga->curr.rast->templ.clip_plane_enable;
131 
132    /* Determine if this shader is the last shader in the vertex
133     * processing stage.
134     */
135    key->last_vertex_stage = !(svga->curr.gs ||
136                               svga->curr.tcs || svga->curr.tes);
137 }
138 
139 
140 /**
141  * svga_reemit_vs_bindings - Reemit the vertex shader bindings
142  */
143 enum pipe_error
svga_reemit_vs_bindings(struct svga_context * svga)144 svga_reemit_vs_bindings(struct svga_context *svga)
145 {
146    enum pipe_error ret;
147    struct svga_winsys_gb_shader *gbshader = NULL;
148    SVGA3dShaderId shaderId = SVGA3D_INVALID_ID;
149 
150    assert(svga->rebind.flags.vs);
151    assert(svga_have_gb_objects(svga));
152 
153    if (svga->state.hw_draw.vs) {
154       gbshader = svga->state.hw_draw.vs->gb_shader;
155       shaderId = svga->state.hw_draw.vs->id;
156    }
157 
158    if (!svga_need_to_rebind_resources(svga)) {
159       ret =  svga->swc->resource_rebind(svga->swc, NULL, gbshader,
160                                         SVGA_RELOC_READ);
161    }
162    else {
163       if (svga_have_vgpu10(svga))
164          ret = SVGA3D_vgpu10_SetShader(svga->swc, SVGA3D_SHADERTYPE_VS,
165                                        gbshader, shaderId);
166       else
167          ret = SVGA3D_SetGBShader(svga->swc, SVGA3D_SHADERTYPE_VS, gbshader);
168    }
169 
170    if (ret != PIPE_OK)
171       return ret;
172 
173    svga->rebind.flags.vs = false;
174    return PIPE_OK;
175 }
176 
177 
178 /**
179  * The current vertex shader is already executed by the 'draw'
180  * module, so we just need to generate a simple vertex shader
181  * to pass through all those VS outputs that will
182  * be consumed by the fragment shader.
183  * Used when we employ the 'draw' module.
184  */
185 static enum pipe_error
compile_passthrough_vs(struct svga_context * svga,struct svga_vertex_shader * vs,struct svga_fragment_shader * fs,struct svga_shader_variant ** out_variant)186 compile_passthrough_vs(struct svga_context *svga,
187                        struct svga_vertex_shader *vs,
188                        struct svga_fragment_shader *fs,
189                        struct svga_shader_variant **out_variant)
190 {
191    struct svga_shader_variant *variant = NULL;
192    unsigned num_inputs;
193    unsigned i;
194    unsigned num_elements;
195    struct svga_vertex_shader new_vs;
196    struct ureg_src src[PIPE_MAX_SHADER_INPUTS];
197    struct ureg_dst dst[PIPE_MAX_SHADER_OUTPUTS];
198    struct ureg_program *ureg;
199    struct svga_compile_key key;
200    enum pipe_error ret;
201 
202    assert(svga_have_vgpu10(svga));
203    assert(fs);
204 
205    num_inputs = fs->base.tgsi_info.num_inputs;
206 
207    ureg = ureg_create(PIPE_SHADER_VERTEX);
208    if (!ureg)
209       return PIPE_ERROR_OUT_OF_MEMORY;
210 
211    /* draw will always add position */
212    dst[0] = ureg_DECL_output(ureg, TGSI_SEMANTIC_POSITION, 0);
213    src[0] = ureg_DECL_vs_input(ureg, 0);
214    num_elements = 1;
215 
216    /**
217     * swtnl backend redefines the input layout based on the
218     * fragment shader's inputs. So we only need to passthrough
219     * those inputs that will be consumed by the fragment shader.
220     * Note: DX10 requires the number of vertex elements
221     * specified in the input layout to be no less than the
222     * number of inputs to the vertex shader.
223     */
224    for (i = 0; i < num_inputs; i++) {
225       switch (fs->base.tgsi_info.input_semantic_name[i]) {
226       case TGSI_SEMANTIC_COLOR:
227       case TGSI_SEMANTIC_GENERIC:
228       case TGSI_SEMANTIC_FOG:
229          dst[num_elements] = ureg_DECL_output(ureg,
230                                 fs->base.tgsi_info.input_semantic_name[i],
231                                 fs->base.tgsi_info.input_semantic_index[i]);
232          src[num_elements] = ureg_DECL_vs_input(ureg, num_elements);
233          num_elements++;
234          break;
235       default:
236          break;
237       }
238    }
239 
240    for (i = 0; i < num_elements; i++) {
241       ureg_MOV(ureg, dst[i], src[i]);
242    }
243 
244    ureg_END(ureg);
245 
246    memset(&new_vs, 0, sizeof(new_vs));
247    new_vs.base.tokens = ureg_get_tokens(ureg, NULL);
248    svga_tgsi_scan_shader(&new_vs.base);
249 
250    memset(&key, 0, sizeof(key));
251    key.vs.undo_viewport = 1;
252 
253    ret = svga_compile_shader(svga, &new_vs.base, &key, &variant);
254    if (ret != PIPE_OK)
255       return ret;
256 
257    ureg_free_tokens(new_vs.base.tokens);
258    ureg_destroy(ureg);
259 
260    /* Overwrite the variant key to indicate it's a pass-through VS */
261    memset(&variant->key, 0, sizeof(variant->key));
262    variant->key.vs.passthrough = 1;
263    variant->key.vs.undo_viewport = 1;
264 
265    *out_variant = variant;
266 
267    return PIPE_OK;
268 }
269 
270 
271 static enum pipe_error
emit_hw_vs(struct svga_context * svga,uint64_t dirty)272 emit_hw_vs(struct svga_context *svga, uint64_t dirty)
273 {
274    struct svga_shader_variant *variant;
275    struct svga_vertex_shader *vs = svga->curr.vs;
276    struct svga_fragment_shader *fs = svga->curr.fs;
277    enum pipe_error ret = PIPE_OK;
278    struct svga_compile_key key;
279 
280    SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_EMITVS);
281 
282    /* If there is an active geometry shader, and it has stream output
283     * defined, then we will skip the stream output from the vertex shader
284     */
285    if (!svga_have_gs_streamout(svga)) {
286       /* No GS stream out */
287       if (svga_have_vs_streamout(svga)) {
288          /* Set VS stream out */
289          ret = svga_set_stream_output(svga, vs->base.stream_output);
290       }
291       else {
292          /* turn off stream out */
293          ret = svga_set_stream_output(svga, NULL);
294       }
295       if (ret != PIPE_OK) {
296          goto done;
297       }
298    }
299 
300    /* SVGA_NEW_NEED_SWTNL */
301    if (svga->state.sw.need_swtnl && !svga_have_vgpu10(svga)) {
302       /* No vertex shader is needed */
303       variant = NULL;
304    }
305    else {
306       make_vs_key(svga, &key);
307 
308       /* See if we already have a VS variant that matches the key */
309       variant = svga_search_shader_key(&vs->base, &key);
310 
311       if (!variant) {
312          /* Create VS variant now */
313          if (key.vs.passthrough) {
314             ret = compile_passthrough_vs(svga, vs, fs, &variant);
315          }
316          else {
317             ret = svga_compile_shader(svga, &vs->base, &key, &variant);
318          }
319          if (ret != PIPE_OK)
320             goto done;
321       }
322    }
323 
324    if (variant != svga->state.hw_draw.vs) {
325       /* Bind the new variant */
326       if (variant) {
327          ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_VS, variant);
328          if (ret != PIPE_OK)
329             goto done;
330          svga->rebind.flags.vs = false;
331       }
332 
333       svga->dirty |= SVGA_NEW_VS_VARIANT;
334       svga->state.hw_draw.vs = variant;
335    }
336 
337 done:
338    SVGA_STATS_TIME_POP(svga_sws(svga));
339    return ret;
340 }
341 
342 struct svga_tracked_state svga_hw_vs =
343 {
344    "vertex shader (hwtnl)",
345    (SVGA_NEW_VS |
346     SVGA_NEW_FS |
347     SVGA_NEW_TEXTURE_BINDING |
348     SVGA_NEW_SAMPLER |
349     SVGA_NEW_RAST |
350     SVGA_NEW_PRESCALE |
351     SVGA_NEW_VELEMENT |
352     SVGA_NEW_NEED_SWTNL |
353     SVGA_NEW_VS_RAW_BUFFER),
354    emit_hw_vs
355 };
356