1 /*
2 * Copyright (c) 2008-2024 Broadcom. All Rights Reserved.
3 * The term “Broadcom” refers to Broadcom Inc.
4 * and/or its subsidiaries.
5 * SPDX-License-Identifier: MIT
6 */
7
8 #include "util/u_inlines.h"
9 #include "pipe/p_defines.h"
10 #include "util/format/u_format.h"
11 #include "util/u_math.h"
12 #include "util/u_memory.h"
13 #include "util/u_bitmask.h"
14 #include "tgsi/tgsi_ureg.h"
15
16 #include "svga_context.h"
17 #include "svga_state.h"
18 #include "svga_cmd.h"
19 #include "svga_shader.h"
20 #include "svga_resource_texture.h"
21 #include "svga_tgsi.h"
22 #include "svga_format.h"
23
24 #include "svga_hw_reg.h"
25
26
27
28 /**
29 * If we fail to compile a fragment shader (because it uses too many
30 * registers, for example) we'll use a dummy/fallback shader that
31 * simply emits a constant color (red for debug, black for release).
32 * We hit this with the Unigine/Heaven demo when Shaders = High.
33 * With black, the demo still looks good.
34 */
35 static const struct tgsi_token *
get_dummy_fragment_shader(void)36 get_dummy_fragment_shader(void)
37 {
38 #if MESA_DEBUG
39 static const float color[4] = { 1.0, 0.0, 0.0, 0.0 }; /* red */
40 #else
41 static const float color[4] = { 0.0, 0.0, 0.0, 0.0 }; /* black */
42 #endif
43 struct ureg_program *ureg;
44 const struct tgsi_token *tokens;
45 struct ureg_src src;
46 struct ureg_dst dst;
47
48 ureg = ureg_create(PIPE_SHADER_FRAGMENT);
49 if (!ureg)
50 return NULL;
51
52 dst = ureg_DECL_output(ureg, TGSI_SEMANTIC_COLOR, 0);
53 src = ureg_DECL_immediate(ureg, color, 4);
54 ureg_MOV(ureg, dst, src);
55 ureg_END(ureg);
56
57 tokens = ureg_get_tokens(ureg, NULL);
58
59 ureg_destroy(ureg);
60
61 return tokens;
62 }
63
64
65 /**
66 * Replace the given shader's instruction with a simple constant-color
67 * shader. We use this when normal shader translation fails.
68 */
69 struct svga_shader_variant *
svga_get_compiled_dummy_fragment_shader(struct svga_context * svga,struct svga_shader * shader,const struct svga_compile_key * key)70 svga_get_compiled_dummy_fragment_shader(struct svga_context *svga,
71 struct svga_shader *shader,
72 const struct svga_compile_key *key)
73 {
74 struct svga_fragment_shader *fs = (struct svga_fragment_shader *)shader;
75 const struct tgsi_token *dummy = get_dummy_fragment_shader();
76 struct svga_shader_variant *variant;
77
78 if (!dummy) {
79 return NULL;
80 }
81
82 FREE((void *) fs->base.tokens);
83 fs->base.tokens = dummy;
84
85 svga_tgsi_scan_shader(&fs->base);
86 svga_remap_generics(fs->base.info.generic_inputs_mask,
87 fs->generic_remap_table);
88
89 variant = svga_tgsi_compile_shader(svga, shader, key);
90 return variant;
91 }
92
93
94 /* SVGA_NEW_TEXTURE_BINDING
95 * SVGA_NEW_RAST
96 * SVGA_NEW_NEED_SWTNL
97 * SVGA_NEW_SAMPLER
98 */
99 static enum pipe_error
make_fs_key(const struct svga_context * svga,struct svga_fragment_shader * fs,struct svga_compile_key * key)100 make_fs_key(const struct svga_context *svga,
101 struct svga_fragment_shader *fs,
102 struct svga_compile_key *key)
103 {
104 const enum pipe_shader_type shader = PIPE_SHADER_FRAGMENT;
105 unsigned i;
106
107 memset(key, 0, sizeof *key);
108
109 memcpy(key->generic_remap_table, fs->generic_remap_table,
110 sizeof(fs->generic_remap_table));
111
112 /* SVGA_NEW_GS, SVGA_NEW_VS
113 */
114 struct svga_geometry_shader *gs = svga->curr.gs;
115 struct svga_vertex_shader *vs = svga->curr.vs;
116 if (gs) {
117 key->fs.gs_generic_outputs = gs->base.info.generic_outputs_mask;
118 key->fs.layer_to_zero = !gs->base.info.writes_layer;
119 } else {
120 key->fs.vs_generic_outputs = vs->base.info.generic_outputs_mask;
121 key->fs.layer_to_zero = 1;
122 }
123
124 /* Only need fragment shader fixup for twoside lighting if doing
125 * hwtnl. Otherwise the draw module does the whole job for us.
126 *
127 * SVGA_NEW_SWTNL
128 */
129 if (!svga->state.sw.need_swtnl) {
130 /* SVGA_NEW_RAST, SVGA_NEW_REDUCED_PRIMITIVE
131 */
132 enum mesa_prim prim_mode;
133 struct svga_shader *shader;
134
135 /* Find the last shader in the vertex pipeline and the output primitive mode
136 * from that shader.
137 */
138 if (svga->curr.tes) {
139 shader = &svga->curr.tes->base;
140 prim_mode = shader->info.tes.prim_mode;
141 } else if (svga->curr.gs) {
142 shader = &svga->curr.gs->base;
143 prim_mode = shader->info.gs.out_prim;
144 } else {
145 shader = &svga->curr.vs->base;
146 prim_mode = svga->curr.reduced_prim;
147 }
148
149 key->fs.light_twoside = svga->curr.rast->templ.light_twoside;
150 key->fs.front_ccw = svga->curr.rast->templ.front_ccw;
151 key->fs.pstipple = (svga->curr.rast->templ.poly_stipple_enable &&
152 prim_mode == MESA_PRIM_TRIANGLES);
153
154 if (svga->curr.gs) {
155 key->fs.aa_point = (svga->curr.rast->templ.point_smooth &&
156 shader->info.gs.in_prim == MESA_PRIM_POINTS &&
157 (svga->curr.rast->pointsize > 1.0 ||
158 shader->info.writes_psize));
159
160 if (key->fs.aa_point) {
161 assert(svga->curr.gs->aa_point_coord_index != -1);
162 key->fs.aa_point_coord_index = svga->curr.gs->aa_point_coord_index;
163 }
164 }
165 }
166
167 /* The blend workaround for simulating logicop xor behaviour
168 * requires that the incoming fragment color be white. This change
169 * achieves that by creating a variant of the current fragment
170 * shader that overrides all output colors with 1,1,1,1
171 *
172 * This will work for most shaders, including those containing
173 * TEXKIL and/or depth-write. However, it will break on the
174 * combination of xor-logicop plus alphatest.
175 *
176 * Ultimately, we could implement alphatest in the shader using
177 * texkil prior to overriding the outgoing fragment color.
178 *
179 * SVGA_NEW_BLEND
180 */
181 key->fs.white_fragments = svga->curr.blend->need_white_fragments;
182
183 key->fs.alpha_to_one = svga->curr.blend->alpha_to_one;
184
185 #if MESA_DEBUG
186 /*
187 * We expect a consistent set of samplers and sampler views.
188 * Do some debug checks/warnings here.
189 */
190 {
191 static bool warned = false;
192 unsigned i, n = MAX2(svga->curr.num_sampler_views[shader],
193 svga->curr.num_samplers[shader]);
194 /* Only warn once to prevent too much debug output */
195 if (!warned) {
196 if (svga->curr.num_sampler_views[shader] !=
197 svga->curr.num_samplers[shader]) {
198 debug_printf("svga: mismatched number of sampler views (%u) "
199 "vs. samplers (%u)\n",
200 svga->curr.num_sampler_views[shader],
201 svga->curr.num_samplers[shader]);
202 }
203 for (i = 0; i < n; i++) {
204 if ((svga->curr.sampler_views[shader][i] == NULL) !=
205 (svga->curr.sampler[shader][i] == NULL))
206 debug_printf("sampler_view[%u] = %p but sampler[%u] = %p\n",
207 i, svga->curr.sampler_views[shader][i],
208 i, svga->curr.sampler[shader][i]);
209 }
210 warned = true;
211 }
212 }
213 #endif
214
215 /* XXX: want to limit this to the textures that the shader actually
216 * refers to.
217 *
218 * SVGA_NEW_TEXTURE_BINDING | SVGA_NEW_SAMPLER
219 */
220 svga_init_shader_key_common(svga, shader, &fs->base, key);
221
222 for (i = 0; i < svga->curr.num_samplers[shader]; ++i) {
223 struct pipe_sampler_view *view = svga->curr.sampler_views[shader][i];
224 const struct svga_sampler_state *sampler = svga->curr.sampler[shader][i];
225 if (view) {
226 struct pipe_resource *tex = view->texture;
227 if (tex->target != PIPE_BUFFER) {
228 struct svga_texture *stex = svga_texture(tex);
229 SVGA3dSurfaceFormat format = stex->key.format;
230
231 if (!svga_have_vgpu10(svga) &&
232 (format == SVGA3D_Z_D16 ||
233 format == SVGA3D_Z_D24X8 ||
234 format == SVGA3D_Z_D24S8)) {
235 /* If we're sampling from a SVGA3D_Z_D16, SVGA3D_Z_D24X8,
236 * or SVGA3D_Z_D24S8 surface, we'll automatically get
237 * shadow comparison. But we only get LEQUAL mode.
238 * Set TEX_COMPARE_NONE here so we don't emit the extra FS
239 * code for shadow comparison.
240 */
241 key->tex[i].compare_mode = PIPE_TEX_COMPARE_NONE;
242 key->tex[i].compare_func = PIPE_FUNC_NEVER;
243 /* These depth formats _only_ support comparison mode and
244 * not ordinary sampling so warn if the later is expected.
245 */
246 if (sampler->compare_mode != PIPE_TEX_COMPARE_R_TO_TEXTURE) {
247 debug_warn_once("Unsupported shadow compare mode");
248 }
249 /* The shader translation code can emit code to
250 * handle ALWAYS and NEVER compare functions
251 */
252 else if (sampler->compare_func == PIPE_FUNC_ALWAYS ||
253 sampler->compare_func == PIPE_FUNC_NEVER) {
254 key->tex[i].compare_mode = sampler->compare_mode;
255 key->tex[i].compare_func = sampler->compare_func;
256 }
257 else if (sampler->compare_func != PIPE_FUNC_LEQUAL) {
258 debug_warn_once("Unsupported shadow compare function");
259 }
260 }
261 }
262 }
263 }
264
265 /* sprite coord gen state */
266 key->sprite_coord_enable = svga->curr.rast->templ.sprite_coord_enable;
267
268 key->sprite_origin_lower_left = (svga->curr.rast->templ.sprite_coord_mode
269 == PIPE_SPRITE_COORD_LOWER_LEFT);
270
271 key->fs.flatshade = svga->curr.rast->templ.flatshade;
272
273 /* SVGA_NEW_DEPTH_STENCIL_ALPHA */
274 if (svga_have_vgpu10(svga)) {
275 /* Alpha testing is not supported in integer-valued render targets. */
276 if (svga_has_any_integer_cbufs(svga)) {
277 key->fs.alpha_func = SVGA3D_CMP_ALWAYS;
278 key->fs.alpha_ref = 0;
279 }
280 else {
281 key->fs.alpha_func = svga->curr.depth->alphafunc;
282 key->fs.alpha_ref = svga->curr.depth->alpharef;
283 }
284 }
285
286 /* SVGA_NEW_FRAME_BUFFER | SVGA_NEW_BLEND */
287 if (fs->base.info.fs.color0_writes_all_cbufs ||
288 svga->curr.blend->need_white_fragments) {
289 /* Replicate color0 output (or white) to N colorbuffers */
290 key->fs.write_color0_to_n_cbufs = svga->curr.framebuffer.nr_cbufs;
291 }
292
293 return PIPE_OK;
294 }
295
296
297 /**
298 * svga_reemit_fs_bindings - Reemit the fragment shader bindings
299 */
300 enum pipe_error
svga_reemit_fs_bindings(struct svga_context * svga)301 svga_reemit_fs_bindings(struct svga_context *svga)
302 {
303 enum pipe_error ret;
304
305 assert(svga->rebind.flags.fs);
306 assert(svga_have_gb_objects(svga));
307
308 if (!svga->state.hw_draw.fs)
309 return PIPE_OK;
310
311 if (!svga_need_to_rebind_resources(svga)) {
312 ret = svga->swc->resource_rebind(svga->swc, NULL,
313 svga->state.hw_draw.fs->gb_shader,
314 SVGA_RELOC_READ);
315 }
316 else {
317 if (svga_have_vgpu10(svga))
318 ret = SVGA3D_vgpu10_SetShader(svga->swc, SVGA3D_SHADERTYPE_PS,
319 svga->state.hw_draw.fs->gb_shader,
320 svga->state.hw_draw.fs->id);
321 else
322 ret = SVGA3D_SetGBShader(svga->swc, SVGA3D_SHADERTYPE_PS,
323 svga->state.hw_draw.fs->gb_shader);
324 }
325
326 if (ret != PIPE_OK)
327 return ret;
328
329 svga->rebind.flags.fs = false;
330 return PIPE_OK;
331 }
332
333
334
335 static enum pipe_error
emit_hw_fs(struct svga_context * svga,uint64_t dirty)336 emit_hw_fs(struct svga_context *svga, uint64_t dirty)
337 {
338 struct svga_shader_variant *variant = NULL;
339 enum pipe_error ret = PIPE_OK;
340 struct svga_fragment_shader *fs = svga->curr.fs;
341 struct svga_compile_key key;
342 struct svga_shader *prevShader = NULL; /* shader in the previous stage */
343
344 SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_EMITFS);
345
346 prevShader = svga->curr.gs ?
347 &svga->curr.gs->base : (svga->curr.tes ?
348 &svga->curr.tes->base : &svga->curr.vs->base);
349
350 /* Disable rasterization if rasterizer_discard flag is set or
351 * vs/gs does not output position.
352 */
353 svga->disable_rasterizer =
354 svga->curr.rast->templ.rasterizer_discard ||
355 !prevShader->info.writes_position;
356
357 /* Set FS to NULL when rasterization is to be disabled */
358 if (svga->disable_rasterizer) {
359 /* Set FS to NULL if it has not been done */
360 if (svga->state.hw_draw.fs) {
361 ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_PS, NULL);
362 if (ret != PIPE_OK)
363 goto done;
364 }
365 svga->rebind.flags.fs = false;
366 svga->state.hw_draw.fs = NULL;
367 goto done;
368 }
369
370 /* SVGA_NEW_BLEND
371 * SVGA_NEW_TEXTURE_BINDING
372 * SVGA_NEW_RAST
373 * SVGA_NEW_NEED_SWTNL
374 * SVGA_NEW_SAMPLER
375 * SVGA_NEW_FRAME_BUFFER
376 * SVGA_NEW_DEPTH_STENCIL_ALPHA
377 * SVGA_NEW_VS
378 */
379 ret = make_fs_key(svga, fs, &key);
380 if (ret != PIPE_OK)
381 goto done;
382
383 variant = svga_search_shader_key(&fs->base, &key);
384 if (!variant) {
385 ret = svga_compile_shader(svga, &fs->base, &key, &variant);
386 if (ret != PIPE_OK)
387 goto done;
388 }
389
390 assert(variant);
391
392 if (variant != svga->state.hw_draw.fs) {
393 ret = svga_set_shader(svga, SVGA3D_SHADERTYPE_PS, variant);
394 if (ret != PIPE_OK)
395 goto done;
396
397 svga->rebind.flags.fs = false;
398
399 svga->dirty |= SVGA_NEW_FS_VARIANT;
400 svga->state.hw_draw.fs = variant;
401 }
402
403 done:
404 SVGA_STATS_TIME_POP(svga_sws(svga));
405 return ret;
406 }
407
408 struct svga_tracked_state svga_hw_fs =
409 {
410 "fragment shader (hwtnl)",
411 (SVGA_NEW_FS |
412 SVGA_NEW_GS |
413 SVGA_NEW_VS |
414 SVGA_NEW_TEXTURE_BINDING |
415 SVGA_NEW_NEED_SWTNL |
416 SVGA_NEW_RAST |
417 SVGA_NEW_STIPPLE |
418 SVGA_NEW_REDUCED_PRIMITIVE |
419 SVGA_NEW_SAMPLER |
420 SVGA_NEW_FRAME_BUFFER |
421 SVGA_NEW_DEPTH_STENCIL_ALPHA |
422 SVGA_NEW_BLEND |
423 SVGA_NEW_FS_RAW_BUFFER),
424 emit_hw_fs
425 };
426
427
428
429