1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file crocus_program.c
25 *
26 * This file contains the driver interface for compiling shaders.
27 *
28 * See crocus_program_cache.c for the in-memory program cache where the
29 * compiled shaders are stored.
30 */
31
32 #include <stdio.h>
33 #include <errno.h>
34 #include "pipe/p_defines.h"
35 #include "pipe/p_state.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_screen.h"
38 #include "util/u_atomic.h"
39 #include "util/u_upload_mgr.h"
40 #include "util/u_debug.h"
41 #include "util/u_prim.h"
42 #include "compiler/nir/nir.h"
43 #include "compiler/nir/nir_builder.h"
44 #include "compiler/nir/nir_serialize.h"
45 #include "intel/compiler/elk/elk_compiler.h"
46 #include "intel/compiler/elk/elk_nir.h"
47 #include "intel/compiler/elk/elk_prim.h"
48 #include "intel/compiler/elk/elk_reg.h"
49 #include "intel/compiler/intel_nir.h"
50 #include "crocus_context.h"
51 #include "nir/tgsi_to_nir.h"
52 #include "program/prog_instruction.h"
53
54 #define KEY_INIT_NO_ID() \
55 .base.tex.swizzles[0 ... ELK_MAX_SAMPLERS - 1] = 0x688
56 #define KEY_INIT() \
57 .base.program_string_id = ish->program_id, \
58 .base.limit_trig_input_range = screen->driconf.limit_trig_input_range, \
59 KEY_INIT_NO_ID()
60
61 static void
crocus_sanitize_tex_key(struct elk_sampler_prog_key_data * key)62 crocus_sanitize_tex_key(struct elk_sampler_prog_key_data *key)
63 {
64 key->gather_channel_quirk_mask = 0;
65 for (unsigned s = 0; s < ELK_MAX_SAMPLERS; s++) {
66 key->swizzles[s] = SWIZZLE_NOOP;
67 key->gfx6_gather_wa[s] = 0;
68 }
69 }
70
71 static uint32_t
crocus_get_texture_swizzle(const struct crocus_context * ice,const struct crocus_sampler_view * t)72 crocus_get_texture_swizzle(const struct crocus_context *ice,
73 const struct crocus_sampler_view *t)
74 {
75 uint32_t swiz = 0;
76
77 for (int i = 0; i < 4; i++) {
78 swiz |= t->swizzle[i] << (i * 3);
79 }
80 return swiz;
81 }
82
can_push_ubo(const struct intel_device_info * devinfo)83 static inline bool can_push_ubo(const struct intel_device_info *devinfo)
84 {
85 /* push works for everyone except SNB at the moment */
86 return devinfo->ver != 6;
87 }
88
89 static uint8_t
gfx6_gather_workaround(enum pipe_format pformat)90 gfx6_gather_workaround(enum pipe_format pformat)
91 {
92 switch (pformat) {
93 case PIPE_FORMAT_R8_SINT: return ELK_WA_SIGN | ELK_WA_8BIT;
94 case PIPE_FORMAT_R8_UINT: return ELK_WA_8BIT;
95 case PIPE_FORMAT_R16_SINT: return ELK_WA_SIGN | ELK_WA_16BIT;
96 case PIPE_FORMAT_R16_UINT: return ELK_WA_16BIT;
97 default:
98 /* Note that even though PIPE_FORMAT_R32_SINT and
99 * PIPE_FORMAT_R32_UINThave format overrides in
100 * the surface state, there is no shader w/a required.
101 */
102 return 0;
103 }
104 }
105
106 static const unsigned crocus_gfx6_swizzle_for_offset[4] = {
107 ELK_SWIZZLE4(0, 1, 2, 3),
108 ELK_SWIZZLE4(1, 2, 3, 3),
109 ELK_SWIZZLE4(2, 3, 3, 3),
110 ELK_SWIZZLE4(3, 3, 3, 3)
111 };
112
113 static void
gfx6_gs_xfb_setup(const struct pipe_stream_output_info * so_info,struct elk_gs_prog_data * gs_prog_data)114 gfx6_gs_xfb_setup(const struct pipe_stream_output_info *so_info,
115 struct elk_gs_prog_data *gs_prog_data)
116 {
117 /* Make sure that the VUE slots won't overflow the unsigned chars in
118 * prog_data->transform_feedback_bindings[].
119 */
120 STATIC_ASSERT(ELK_VARYING_SLOT_COUNT <= 256);
121
122 /* Make sure that we don't need more binding table entries than we've
123 * set aside for use in transform feedback. (We shouldn't, since we
124 * set aside enough binding table entries to have one per component).
125 */
126 assert(so_info->num_outputs <= ELK_MAX_SOL_BINDINGS);
127
128 gs_prog_data->num_transform_feedback_bindings = so_info->num_outputs;
129 for (unsigned i = 0; i < so_info->num_outputs; i++) {
130 gs_prog_data->transform_feedback_bindings[i] =
131 so_info->output[i].register_index;
132 gs_prog_data->transform_feedback_swizzles[i] =
133 crocus_gfx6_swizzle_for_offset[so_info->output[i].start_component];
134 }
135 }
136
137 static void
gfx6_ff_gs_xfb_setup(const struct pipe_stream_output_info * so_info,struct elk_ff_gs_prog_key * key)138 gfx6_ff_gs_xfb_setup(const struct pipe_stream_output_info *so_info,
139 struct elk_ff_gs_prog_key *key)
140 {
141 key->num_transform_feedback_bindings = so_info->num_outputs;
142 for (unsigned i = 0; i < so_info->num_outputs; i++) {
143 key->transform_feedback_bindings[i] =
144 so_info->output[i].register_index;
145 key->transform_feedback_swizzles[i] =
146 crocus_gfx6_swizzle_for_offset[so_info->output[i].start_component];
147 }
148 }
149
150 static void
crocus_populate_sampler_prog_key_data(struct crocus_context * ice,const struct intel_device_info * devinfo,gl_shader_stage stage,struct crocus_uncompiled_shader * ish,bool uses_texture_gather,struct elk_sampler_prog_key_data * key)151 crocus_populate_sampler_prog_key_data(struct crocus_context *ice,
152 const struct intel_device_info *devinfo,
153 gl_shader_stage stage,
154 struct crocus_uncompiled_shader *ish,
155 bool uses_texture_gather,
156 struct elk_sampler_prog_key_data *key)
157 {
158 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
159 uint32_t mask = ish->nir->info.textures_used[0];
160
161 while (mask) {
162 const int s = u_bit_scan(&mask);
163
164 struct crocus_sampler_view *texture = ice->state.shaders[stage].textures[s];
165 key->swizzles[s] = SWIZZLE_NOOP;
166
167 if (!texture)
168 continue;
169 if (texture->base.target == PIPE_BUFFER)
170 continue;
171 if (devinfo->verx10 < 75) {
172 key->swizzles[s] = crocus_get_texture_swizzle(ice, texture);
173 }
174
175 screen->vtbl.fill_clamp_mask(ice->state.shaders[stage].samplers[s], s, key->gl_clamp_mask);
176
177 /* gather4 for RG32* is broken in multiple ways on Gen7. */
178 if (devinfo->ver == 7 && uses_texture_gather) {
179 switch (texture->base.format) {
180 case PIPE_FORMAT_R32G32_UINT:
181 case PIPE_FORMAT_R32G32_SINT: {
182 /* We have to override the format to R32G32_FLOAT_LD.
183 * This means that SCS_ALPHA and SCS_ONE will return 0x3f8
184 * (1.0) rather than integer 1. This needs shader hacks.
185 *
186 * On Ivybridge, we whack W (alpha) to ONE in our key's
187 * swizzle. On Haswell, we look at the original texture
188 * swizzle, and use XYZW with channels overridden to ONE,
189 * leaving normal texture swizzling to SCS.
190 */
191 unsigned src_swizzle = key->swizzles[s];
192 for (int i = 0; i < 4; i++) {
193 unsigned src_comp = GET_SWZ(src_swizzle, i);
194 if (src_comp == SWIZZLE_ONE || src_comp == SWIZZLE_W) {
195 key->swizzles[i] &= ~(0x7 << (3 * i));
196 key->swizzles[i] |= SWIZZLE_ONE << (3 * i);
197 }
198 }
199 }
200 FALLTHROUGH;
201 case PIPE_FORMAT_R32G32_FLOAT:
202 /* The channel select for green doesn't work - we have to
203 * request blue. Haswell can use SCS for this, but Ivybridge
204 * needs a shader workaround.
205 */
206 if (devinfo->verx10 < 75)
207 key->gather_channel_quirk_mask |= 1 << s;
208 break;
209 default:
210 break;
211 }
212 }
213 if (devinfo->ver == 6 && uses_texture_gather) {
214 key->gfx6_gather_wa[s] = gfx6_gather_workaround(texture->base.format);
215 }
216 }
217 }
218
219 static void
crocus_lower_swizzles(struct nir_shader * nir,const struct elk_sampler_prog_key_data * key_tex)220 crocus_lower_swizzles(struct nir_shader *nir,
221 const struct elk_sampler_prog_key_data *key_tex)
222 {
223 struct nir_lower_tex_options tex_options = {
224 .lower_invalid_implicit_lod = true,
225 };
226 uint32_t mask = nir->info.textures_used[0];
227
228 while (mask) {
229 const int s = u_bit_scan(&mask);
230
231 if (key_tex->swizzles[s] == SWIZZLE_NOOP)
232 continue;
233
234 tex_options.swizzle_result |= (1 << s);
235 for (unsigned c = 0; c < 4; c++)
236 tex_options.swizzles[s][c] = GET_SWZ(key_tex->swizzles[s], c);
237 }
238 if (tex_options.swizzle_result)
239 nir_lower_tex(nir, &tex_options);
240 }
241
242 static unsigned
get_new_program_id(struct crocus_screen * screen)243 get_new_program_id(struct crocus_screen *screen)
244 {
245 return p_atomic_inc_return(&screen->program_id);
246 }
247
248 static nir_def *
get_aoa_deref_offset(nir_builder * b,nir_deref_instr * deref,unsigned elem_size)249 get_aoa_deref_offset(nir_builder *b,
250 nir_deref_instr *deref,
251 unsigned elem_size)
252 {
253 unsigned array_size = elem_size;
254 nir_def *offset = nir_imm_int(b, 0);
255
256 while (deref->deref_type != nir_deref_type_var) {
257 assert(deref->deref_type == nir_deref_type_array);
258
259 /* This level's element size is the previous level's array size */
260 nir_def *index = deref->arr.index.ssa;
261 assert(deref->arr.index.ssa);
262 offset = nir_iadd(b, offset,
263 nir_imul_imm(b, index, array_size));
264
265 deref = nir_deref_instr_parent(deref);
266 assert(glsl_type_is_array(deref->type));
267 array_size *= glsl_get_length(deref->type);
268 }
269
270 /* Accessing an invalid surface index with the dataport can result in a
271 * hang. According to the spec "if the index used to select an individual
272 * element is negative or greater than or equal to the size of the array,
273 * the results of the operation are undefined but may not lead to
274 * termination" -- which is one of the possible outcomes of the hang.
275 * Clamp the index to prevent access outside of the array bounds.
276 */
277 return nir_umin(b, offset, nir_imm_int(b, array_size - elem_size));
278 }
279
280 static void
crocus_lower_storage_image_derefs(nir_shader * nir)281 crocus_lower_storage_image_derefs(nir_shader *nir)
282 {
283 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
284
285 nir_builder b = nir_builder_create(impl);
286
287 nir_foreach_block(block, impl) {
288 nir_foreach_instr_safe(instr, block) {
289 if (instr->type != nir_instr_type_intrinsic)
290 continue;
291
292 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
293 switch (intrin->intrinsic) {
294 case nir_intrinsic_image_deref_load:
295 case nir_intrinsic_image_deref_store:
296 case nir_intrinsic_image_deref_atomic:
297 case nir_intrinsic_image_deref_atomic_swap:
298 case nir_intrinsic_image_deref_size:
299 case nir_intrinsic_image_deref_samples:
300 case nir_intrinsic_image_deref_load_raw_intel:
301 case nir_intrinsic_image_deref_store_raw_intel: {
302 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
303 nir_variable *var = nir_deref_instr_get_variable(deref);
304
305 b.cursor = nir_before_instr(&intrin->instr);
306 nir_def *index =
307 nir_iadd_imm(&b, get_aoa_deref_offset(&b, deref, 1),
308 var->data.driver_location);
309 nir_rewrite_image_intrinsic(intrin, index, false);
310 break;
311 }
312
313 default:
314 break;
315 }
316 }
317 }
318 }
319
320 // XXX: need unify_interfaces() at link time...
321
322 /**
323 * Undo nir_lower_passthrough_edgeflags but keep the inputs_read flag.
324 */
325 static bool
crocus_fix_edge_flags(nir_shader * nir)326 crocus_fix_edge_flags(nir_shader *nir)
327 {
328 if (nir->info.stage != MESA_SHADER_VERTEX) {
329 nir_shader_preserve_all_metadata(nir);
330 return false;
331 }
332
333 nir_variable *var = nir_find_variable_with_location(nir, nir_var_shader_out,
334 VARYING_SLOT_EDGE);
335 if (!var) {
336 nir_shader_preserve_all_metadata(nir);
337 return false;
338 }
339
340 var->data.mode = nir_var_shader_temp;
341 nir->info.outputs_written &= ~VARYING_BIT_EDGE;
342 nir->info.inputs_read &= ~VERT_BIT_EDGEFLAG;
343 nir_fixup_deref_modes(nir);
344
345 nir_foreach_function_impl(impl, nir) {
346 nir_metadata_preserve(impl, nir_metadata_control_flow |
347 nir_metadata_live_defs |
348 nir_metadata_loop_analysis);
349 }
350
351 return true;
352 }
353
354 /**
355 * Fix an uncompiled shader's stream output info.
356 *
357 * Core Gallium stores output->register_index as a "slot" number, where
358 * slots are assigned consecutively to all outputs in info->outputs_written.
359 * This naive packing of outputs doesn't work for us - we too have slots,
360 * but the layout is defined by the VUE map, which we won't have until we
361 * compile a specific shader variant. So, we remap these and simply store
362 * VARYING_SLOT_* in our copy's output->register_index fields.
363 *
364 * We also fix up VARYING_SLOT_{LAYER,VIEWPORT,PSIZ} to select the Y/Z/W
365 * components of our VUE header. See elk_vue_map.c for the layout.
366 */
367 static void
update_so_info(struct pipe_stream_output_info * so_info,uint64_t outputs_written)368 update_so_info(struct pipe_stream_output_info *so_info,
369 uint64_t outputs_written)
370 {
371 uint8_t reverse_map[64] = {};
372 unsigned slot = 0;
373 while (outputs_written) {
374 reverse_map[slot++] = u_bit_scan64(&outputs_written);
375 }
376
377 for (unsigned i = 0; i < so_info->num_outputs; i++) {
378 struct pipe_stream_output *output = &so_info->output[i];
379
380 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
381 output->register_index = reverse_map[output->register_index];
382
383 /* The VUE header contains three scalar fields packed together:
384 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
385 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
386 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
387 */
388 switch (output->register_index) {
389 case VARYING_SLOT_LAYER:
390 assert(output->num_components == 1);
391 output->register_index = VARYING_SLOT_PSIZ;
392 output->start_component = 1;
393 break;
394 case VARYING_SLOT_VIEWPORT:
395 assert(output->num_components == 1);
396 output->register_index = VARYING_SLOT_PSIZ;
397 output->start_component = 2;
398 break;
399 case VARYING_SLOT_PSIZ:
400 assert(output->num_components == 1);
401 output->start_component = 3;
402 break;
403 }
404
405 //info->outputs_written |= 1ull << output->register_index;
406 }
407 }
408
409 static void
setup_vec4_image_sysval(uint32_t * sysvals,uint32_t idx,unsigned offset,unsigned n)410 setup_vec4_image_sysval(uint32_t *sysvals, uint32_t idx,
411 unsigned offset, unsigned n)
412 {
413 assert(offset % sizeof(uint32_t) == 0);
414
415 for (unsigned i = 0; i < n; ++i)
416 sysvals[i] = ELK_PARAM_IMAGE(idx, offset / sizeof(uint32_t) + i);
417
418 for (unsigned i = n; i < 4; ++i)
419 sysvals[i] = ELK_PARAM_BUILTIN_ZERO;
420 }
421
422 /**
423 * Associate NIR uniform variables with the prog_data->param[] mechanism
424 * used by the backend. Also, decide which UBOs we'd like to push in an
425 * ideal situation (though the backend can reduce this).
426 */
427 static void
crocus_setup_uniforms(ASSERTED const struct intel_device_info * devinfo,void * mem_ctx,nir_shader * nir,struct elk_stage_prog_data * prog_data,enum elk_param_builtin ** out_system_values,unsigned * out_num_system_values,unsigned * out_num_cbufs)428 crocus_setup_uniforms(ASSERTED const struct intel_device_info *devinfo,
429 void *mem_ctx,
430 nir_shader *nir,
431 struct elk_stage_prog_data *prog_data,
432 enum elk_param_builtin **out_system_values,
433 unsigned *out_num_system_values,
434 unsigned *out_num_cbufs)
435 {
436 const unsigned CROCUS_MAX_SYSTEM_VALUES =
437 PIPE_MAX_SHADER_IMAGES * ISL_IMAGE_PARAM_SIZE;
438 enum elk_param_builtin *system_values =
439 rzalloc_array(mem_ctx, enum elk_param_builtin, CROCUS_MAX_SYSTEM_VALUES);
440 unsigned num_system_values = 0;
441
442 unsigned patch_vert_idx = -1;
443 unsigned tess_outer_default_idx = -1;
444 unsigned tess_inner_default_idx = -1;
445 unsigned ucp_idx[CROCUS_MAX_CLIP_PLANES];
446 unsigned img_idx[PIPE_MAX_SHADER_IMAGES];
447 unsigned variable_group_size_idx = -1;
448 memset(ucp_idx, -1, sizeof(ucp_idx));
449 memset(img_idx, -1, sizeof(img_idx));
450
451 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
452
453 nir_builder b = nir_builder_at(nir_before_impl(impl));
454
455 nir_def *temp_ubo_name = nir_undef(&b, 1, 32);
456 nir_def *temp_const_ubo_name = NULL;
457
458 /* Turn system value intrinsics into uniforms */
459 nir_foreach_block(block, impl) {
460 nir_foreach_instr_safe(instr, block) {
461 if (instr->type != nir_instr_type_intrinsic)
462 continue;
463
464 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
465 nir_def *offset;
466
467 switch (intrin->intrinsic) {
468 case nir_intrinsic_load_base_workgroup_id: {
469 /* GL doesn't have a concept of base workgroup */
470 b.cursor = nir_instr_remove(&intrin->instr);
471 nir_def_rewrite_uses(&intrin->def,
472 nir_imm_zero(&b, 3, 32));
473 continue;
474 }
475 case nir_intrinsic_load_constant: {
476 /* This one is special because it reads from the shader constant
477 * data and not cbuf0 which gallium uploads for us.
478 */
479 b.cursor = nir_before_instr(instr);
480 nir_def *offset =
481 nir_iadd_imm(&b, intrin->src[0].ssa,
482 nir_intrinsic_base(intrin));
483
484 if (temp_const_ubo_name == NULL)
485 temp_const_ubo_name = nir_imm_int(&b, 0);
486
487 nir_intrinsic_instr *load_ubo =
488 nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ubo);
489 load_ubo->num_components = intrin->num_components;
490 load_ubo->src[0] = nir_src_for_ssa(temp_const_ubo_name);
491 load_ubo->src[1] = nir_src_for_ssa(offset);
492 nir_intrinsic_set_align(load_ubo, 4, 0);
493 nir_intrinsic_set_range_base(load_ubo, 0);
494 nir_intrinsic_set_range(load_ubo, ~0);
495 nir_def_init(&load_ubo->instr, &load_ubo->def,
496 intrin->def.num_components,
497 intrin->def.bit_size);
498 nir_builder_instr_insert(&b, &load_ubo->instr);
499
500 nir_def_replace(&intrin->def, &load_ubo->def);
501 continue;
502 }
503 case nir_intrinsic_load_user_clip_plane: {
504 unsigned ucp = nir_intrinsic_ucp_id(intrin);
505
506 if (ucp_idx[ucp] == -1) {
507 ucp_idx[ucp] = num_system_values;
508 num_system_values += 4;
509 }
510
511 for (int i = 0; i < 4; i++) {
512 system_values[ucp_idx[ucp] + i] =
513 ELK_PARAM_BUILTIN_CLIP_PLANE(ucp, i);
514 }
515
516 b.cursor = nir_before_instr(instr);
517 offset = nir_imm_int(&b, ucp_idx[ucp] * sizeof(uint32_t));
518 break;
519 }
520 case nir_intrinsic_load_patch_vertices_in:
521 if (patch_vert_idx == -1)
522 patch_vert_idx = num_system_values++;
523
524 system_values[patch_vert_idx] =
525 ELK_PARAM_BUILTIN_PATCH_VERTICES_IN;
526
527 b.cursor = nir_before_instr(instr);
528 offset = nir_imm_int(&b, patch_vert_idx * sizeof(uint32_t));
529 break;
530 case nir_intrinsic_load_tess_level_outer_default:
531 if (tess_outer_default_idx == -1) {
532 tess_outer_default_idx = num_system_values;
533 num_system_values += 4;
534 }
535
536 for (int i = 0; i < 4; i++) {
537 system_values[tess_outer_default_idx + i] =
538 ELK_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
539 }
540
541 b.cursor = nir_before_instr(instr);
542 offset =
543 nir_imm_int(&b, tess_outer_default_idx * sizeof(uint32_t));
544 break;
545 case nir_intrinsic_load_tess_level_inner_default:
546 if (tess_inner_default_idx == -1) {
547 tess_inner_default_idx = num_system_values;
548 num_system_values += 2;
549 }
550
551 for (int i = 0; i < 2; i++) {
552 system_values[tess_inner_default_idx + i] =
553 ELK_PARAM_BUILTIN_TESS_LEVEL_INNER_X + i;
554 }
555
556 b.cursor = nir_before_instr(instr);
557 offset =
558 nir_imm_int(&b, tess_inner_default_idx * sizeof(uint32_t));
559 break;
560 case nir_intrinsic_image_deref_load_param_intel: {
561 assert(devinfo->ver < 9);
562 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
563 nir_variable *var = nir_deref_instr_get_variable(deref);
564
565 if (img_idx[var->data.binding] == -1) {
566 /* GL only allows arrays of arrays of images. */
567 assert(glsl_type_is_image(glsl_without_array(var->type)));
568 unsigned num_images = MAX2(1, glsl_get_aoa_size(var->type));
569
570 for (int i = 0; i < num_images; i++) {
571 const unsigned img = var->data.binding + i;
572
573 img_idx[img] = num_system_values;
574 num_system_values += ISL_IMAGE_PARAM_SIZE;
575
576 uint32_t *img_sv = &system_values[img_idx[img]];
577
578 setup_vec4_image_sysval(
579 img_sv + ISL_IMAGE_PARAM_OFFSET_OFFSET, img,
580 offsetof(struct isl_image_param, offset), 2);
581 setup_vec4_image_sysval(
582 img_sv + ISL_IMAGE_PARAM_SIZE_OFFSET, img,
583 offsetof(struct isl_image_param, size), 3);
584 setup_vec4_image_sysval(
585 img_sv + ISL_IMAGE_PARAM_STRIDE_OFFSET, img,
586 offsetof(struct isl_image_param, stride), 4);
587 setup_vec4_image_sysval(
588 img_sv + ISL_IMAGE_PARAM_TILING_OFFSET, img,
589 offsetof(struct isl_image_param, tiling), 3);
590 setup_vec4_image_sysval(
591 img_sv + ISL_IMAGE_PARAM_SWIZZLING_OFFSET, img,
592 offsetof(struct isl_image_param, swizzling), 2);
593 }
594 }
595
596 b.cursor = nir_before_instr(instr);
597 offset = nir_iadd_imm(&b,
598 get_aoa_deref_offset(&b, deref, ISL_IMAGE_PARAM_SIZE * 4),
599 img_idx[var->data.binding] * 4 +
600 nir_intrinsic_base(intrin) * 16);
601 break;
602 }
603 case nir_intrinsic_load_workgroup_size: {
604 assert(nir->info.workgroup_size_variable);
605 if (variable_group_size_idx == -1) {
606 variable_group_size_idx = num_system_values;
607 num_system_values += 3;
608 for (int i = 0; i < 3; i++) {
609 system_values[variable_group_size_idx + i] =
610 ELK_PARAM_BUILTIN_WORK_GROUP_SIZE_X + i;
611 }
612 }
613
614 b.cursor = nir_before_instr(instr);
615 offset = nir_imm_int(&b,
616 variable_group_size_idx * sizeof(uint32_t));
617 break;
618 }
619 default:
620 continue;
621 }
622
623 unsigned comps = nir_intrinsic_dest_components(intrin);
624
625 nir_intrinsic_instr *load =
626 nir_intrinsic_instr_create(nir, nir_intrinsic_load_ubo);
627 load->num_components = comps;
628 load->src[0] = nir_src_for_ssa(temp_ubo_name);
629 load->src[1] = nir_src_for_ssa(offset);
630 nir_intrinsic_set_align(load, 4, 0);
631 nir_intrinsic_set_range_base(load, 0);
632 nir_intrinsic_set_range(load, ~0);
633 nir_def_init(&load->instr, &load->def, comps, 32);
634 nir_builder_instr_insert(&b, &load->instr);
635 nir_def_rewrite_uses(&intrin->def,
636 &load->def);
637 nir_instr_remove(instr);
638 }
639 }
640
641 nir_validate_shader(nir, "before remapping");
642
643 /* Uniforms are stored in constant buffer 0, the
644 * user-facing UBOs are indexed by one. So if any constant buffer is
645 * needed, the constant buffer 0 will be needed, so account for it.
646 */
647 unsigned num_cbufs = nir->info.num_ubos;
648 if (num_cbufs || nir->num_uniforms)
649 num_cbufs++;
650
651 /* Place the new params in a new cbuf. */
652 if (num_system_values > 0) {
653 unsigned sysval_cbuf_index = num_cbufs;
654 num_cbufs++;
655
656 system_values = reralloc(mem_ctx, system_values, enum elk_param_builtin,
657 num_system_values);
658
659 nir_foreach_block(block, impl) {
660 nir_foreach_instr_safe(instr, block) {
661 if (instr->type != nir_instr_type_intrinsic)
662 continue;
663
664 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
665
666 if (load->intrinsic != nir_intrinsic_load_ubo)
667 continue;
668
669 b.cursor = nir_before_instr(instr);
670
671 if (load->src[0].ssa == temp_ubo_name) {
672 nir_def *imm = nir_imm_int(&b, sysval_cbuf_index);
673 nir_src_rewrite(&load->src[0], imm);
674 }
675 }
676 }
677
678 /* We need to fold the new iadds for elk_nir_analyze_ubo_ranges */
679 nir_opt_constant_folding(nir);
680 } else {
681 ralloc_free(system_values);
682 system_values = NULL;
683 }
684
685 assert(num_cbufs < PIPE_MAX_CONSTANT_BUFFERS);
686 nir_validate_shader(nir, "after remap");
687
688 /* We don't use params[] but gallium leaves num_uniforms set. We use this
689 * to detect when cbuf0 exists but we don't need it anymore when we get
690 * here. Instead, zero it out so that the back-end doesn't get confused
691 * when nr_params * 4 != num_uniforms != nr_params * 4.
692 */
693 nir->num_uniforms = 0;
694
695 /* Constant loads (if any) need to go at the end of the constant buffers so
696 * we need to know num_cbufs before we can lower to them.
697 */
698 if (temp_const_ubo_name != NULL) {
699 nir_load_const_instr *const_ubo_index =
700 nir_instr_as_load_const(temp_const_ubo_name->parent_instr);
701 assert(const_ubo_index->def.bit_size == 32);
702 const_ubo_index->value[0].u32 = num_cbufs;
703 }
704
705 *out_system_values = system_values;
706 *out_num_system_values = num_system_values;
707 *out_num_cbufs = num_cbufs;
708 }
709
710 static const char *surface_group_names[] = {
711 [CROCUS_SURFACE_GROUP_RENDER_TARGET] = "render target",
712 [CROCUS_SURFACE_GROUP_RENDER_TARGET_READ] = "non-coherent render target read",
713 [CROCUS_SURFACE_GROUP_SOL] = "streamout",
714 [CROCUS_SURFACE_GROUP_CS_WORK_GROUPS] = "CS work groups",
715 [CROCUS_SURFACE_GROUP_TEXTURE] = "texture",
716 [CROCUS_SURFACE_GROUP_TEXTURE_GATHER] = "texture gather",
717 [CROCUS_SURFACE_GROUP_UBO] = "ubo",
718 [CROCUS_SURFACE_GROUP_SSBO] = "ssbo",
719 [CROCUS_SURFACE_GROUP_IMAGE] = "image",
720 };
721
722 static void
crocus_print_binding_table(FILE * fp,const char * name,const struct crocus_binding_table * bt)723 crocus_print_binding_table(FILE *fp, const char *name,
724 const struct crocus_binding_table *bt)
725 {
726 STATIC_ASSERT(ARRAY_SIZE(surface_group_names) == CROCUS_SURFACE_GROUP_COUNT);
727
728 uint32_t total = 0;
729 uint32_t compacted = 0;
730
731 for (int i = 0; i < CROCUS_SURFACE_GROUP_COUNT; i++) {
732 uint32_t size = bt->sizes[i];
733 total += size;
734 if (size)
735 compacted += util_bitcount64(bt->used_mask[i]);
736 }
737
738 if (total == 0) {
739 fprintf(fp, "Binding table for %s is empty\n\n", name);
740 return;
741 }
742
743 if (total != compacted) {
744 fprintf(fp, "Binding table for %s "
745 "(compacted to %u entries from %u entries)\n",
746 name, compacted, total);
747 } else {
748 fprintf(fp, "Binding table for %s (%u entries)\n", name, total);
749 }
750
751 uint32_t entry = 0;
752 for (int i = 0; i < CROCUS_SURFACE_GROUP_COUNT; i++) {
753 uint64_t mask = bt->used_mask[i];
754 while (mask) {
755 int index = u_bit_scan64(&mask);
756 fprintf(fp, " [%u] %s #%d\n", entry++, surface_group_names[i], index);
757 }
758 }
759 fprintf(fp, "\n");
760 }
761
762 enum {
763 /* Max elements in a surface group. */
764 SURFACE_GROUP_MAX_ELEMENTS = 64,
765 };
766
767 static void
rewrite_src_with_bti(nir_builder * b,struct crocus_binding_table * bt,nir_instr * instr,nir_src * src,enum crocus_surface_group group)768 rewrite_src_with_bti(nir_builder *b, struct crocus_binding_table *bt,
769 nir_instr *instr, nir_src *src,
770 enum crocus_surface_group group)
771 {
772 assert(bt->sizes[group] > 0);
773
774 b->cursor = nir_before_instr(instr);
775 nir_def *bti;
776 if (nir_src_is_const(*src)) {
777 uint32_t index = nir_src_as_uint(*src);
778 bti = nir_imm_intN_t(b, crocus_group_index_to_bti(bt, group, index),
779 src->ssa->bit_size);
780 } else {
781 /* Indirect usage makes all the surfaces of the group to be available,
782 * so we can just add the base.
783 */
784 assert(bt->used_mask[group] == BITFIELD64_MASK(bt->sizes[group]));
785 bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
786 }
787 nir_src_rewrite(src, bti);
788 }
789
790 static void
mark_used_with_src(struct crocus_binding_table * bt,nir_src * src,enum crocus_surface_group group)791 mark_used_with_src(struct crocus_binding_table *bt, nir_src *src,
792 enum crocus_surface_group group)
793 {
794 assert(bt->sizes[group] > 0);
795
796 if (nir_src_is_const(*src)) {
797 uint64_t index = nir_src_as_uint(*src);
798 assert(index < bt->sizes[group]);
799 bt->used_mask[group] |= 1ull << index;
800 } else {
801 /* There's an indirect usage, we need all the surfaces. */
802 bt->used_mask[group] = BITFIELD64_MASK(bt->sizes[group]);
803 }
804 }
805
806 static bool
skip_compacting_binding_tables(void)807 skip_compacting_binding_tables(void)
808 {
809 static int skip = -1;
810 if (skip < 0)
811 skip = debug_get_bool_option("INTEL_DISABLE_COMPACT_BINDING_TABLE", false);
812 return skip;
813 }
814
815 /**
816 * Set up the binding table indices and apply to the shader.
817 */
818 static void
crocus_setup_binding_table(const struct intel_device_info * devinfo,struct nir_shader * nir,struct crocus_binding_table * bt,unsigned num_render_targets,unsigned num_system_values,unsigned num_cbufs,const struct elk_sampler_prog_key_data * key)819 crocus_setup_binding_table(const struct intel_device_info *devinfo,
820 struct nir_shader *nir,
821 struct crocus_binding_table *bt,
822 unsigned num_render_targets,
823 unsigned num_system_values,
824 unsigned num_cbufs,
825 const struct elk_sampler_prog_key_data *key)
826 {
827 const struct shader_info *info = &nir->info;
828
829 memset(bt, 0, sizeof(*bt));
830
831 /* Set the sizes for each surface group. For some groups, we already know
832 * upfront how many will be used, so mark them.
833 */
834 if (info->stage == MESA_SHADER_FRAGMENT) {
835 bt->sizes[CROCUS_SURFACE_GROUP_RENDER_TARGET] = num_render_targets;
836 /* All render targets used. */
837 bt->used_mask[CROCUS_SURFACE_GROUP_RENDER_TARGET] =
838 BITFIELD64_MASK(num_render_targets);
839
840 /* Setup render target read surface group in order to support non-coherent
841 * framebuffer fetch on Gfx7
842 */
843 if (devinfo->ver >= 6 && info->outputs_read) {
844 bt->sizes[CROCUS_SURFACE_GROUP_RENDER_TARGET_READ] = num_render_targets;
845 bt->used_mask[CROCUS_SURFACE_GROUP_RENDER_TARGET_READ] =
846 BITFIELD64_MASK(num_render_targets);
847 }
848 } else if (info->stage == MESA_SHADER_COMPUTE) {
849 bt->sizes[CROCUS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
850 } else if (info->stage == MESA_SHADER_GEOMETRY) {
851 /* In gfx6 we reserve the first ELK_MAX_SOL_BINDINGS entries for transform
852 * feedback surfaces.
853 */
854 if (devinfo->ver == 6) {
855 bt->sizes[CROCUS_SURFACE_GROUP_SOL] = ELK_MAX_SOL_BINDINGS;
856 bt->used_mask[CROCUS_SURFACE_GROUP_SOL] = (uint64_t)-1;
857 }
858 }
859
860 bt->sizes[CROCUS_SURFACE_GROUP_TEXTURE] = BITSET_LAST_BIT(info->textures_used);
861 bt->used_mask[CROCUS_SURFACE_GROUP_TEXTURE] = info->textures_used[0];
862
863 if (info->uses_texture_gather && devinfo->ver < 8) {
864 bt->sizes[CROCUS_SURFACE_GROUP_TEXTURE_GATHER] = BITSET_LAST_BIT(info->textures_used);
865 bt->used_mask[CROCUS_SURFACE_GROUP_TEXTURE_GATHER] = info->textures_used[0];
866 }
867
868 bt->sizes[CROCUS_SURFACE_GROUP_IMAGE] = info->num_images;
869
870 /* Allocate an extra slot in the UBO section for NIR constants.
871 * Binding table compaction will remove it if unnecessary.
872 *
873 * We don't include them in crocus_compiled_shader::num_cbufs because
874 * they are uploaded separately from shs->constbufs[], but from a shader
875 * point of view, they're another UBO (at the end of the section).
876 */
877 bt->sizes[CROCUS_SURFACE_GROUP_UBO] = num_cbufs + 1;
878
879 bt->sizes[CROCUS_SURFACE_GROUP_SSBO] = info->num_ssbos;
880
881 for (int i = 0; i < CROCUS_SURFACE_GROUP_COUNT; i++)
882 assert(bt->sizes[i] <= SURFACE_GROUP_MAX_ELEMENTS);
883
884 /* Mark surfaces used for the cases we don't have the information available
885 * upfront.
886 */
887 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
888 nir_foreach_block (block, impl) {
889 nir_foreach_instr (instr, block) {
890 if (instr->type != nir_instr_type_intrinsic)
891 continue;
892
893 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
894 switch (intrin->intrinsic) {
895 case nir_intrinsic_load_num_workgroups:
896 bt->used_mask[CROCUS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
897 break;
898
899 case nir_intrinsic_load_output:
900 if (devinfo->ver >= 6) {
901 mark_used_with_src(bt, &intrin->src[0],
902 CROCUS_SURFACE_GROUP_RENDER_TARGET_READ);
903 }
904 break;
905
906 case nir_intrinsic_image_size:
907 case nir_intrinsic_image_load:
908 case nir_intrinsic_image_store:
909 case nir_intrinsic_image_atomic:
910 case nir_intrinsic_image_atomic_swap:
911 case nir_intrinsic_image_load_raw_intel:
912 case nir_intrinsic_image_store_raw_intel:
913 mark_used_with_src(bt, &intrin->src[0], CROCUS_SURFACE_GROUP_IMAGE);
914 break;
915
916 case nir_intrinsic_load_ubo:
917 mark_used_with_src(bt, &intrin->src[0], CROCUS_SURFACE_GROUP_UBO);
918 break;
919
920 case nir_intrinsic_store_ssbo:
921 mark_used_with_src(bt, &intrin->src[1], CROCUS_SURFACE_GROUP_SSBO);
922 break;
923
924 case nir_intrinsic_get_ssbo_size:
925 case nir_intrinsic_ssbo_atomic:
926 case nir_intrinsic_ssbo_atomic_swap:
927 case nir_intrinsic_load_ssbo:
928 mark_used_with_src(bt, &intrin->src[0], CROCUS_SURFACE_GROUP_SSBO);
929 break;
930
931 default:
932 break;
933 }
934 }
935 }
936
937 /* When disable we just mark everything as used. */
938 if (unlikely(skip_compacting_binding_tables())) {
939 for (int i = 0; i < CROCUS_SURFACE_GROUP_COUNT; i++)
940 bt->used_mask[i] = BITFIELD64_MASK(bt->sizes[i]);
941 }
942
943 /* Calculate the offsets and the binding table size based on the used
944 * surfaces. After this point, the functions to go between "group indices"
945 * and binding table indices can be used.
946 */
947 uint32_t next = 0;
948 for (int i = 0; i < CROCUS_SURFACE_GROUP_COUNT; i++) {
949 if (bt->used_mask[i] != 0) {
950 bt->offsets[i] = next;
951 next += util_bitcount64(bt->used_mask[i]);
952 }
953 }
954 bt->size_bytes = next * 4;
955
956 if (INTEL_DEBUG(DEBUG_BT)) {
957 crocus_print_binding_table(stderr, gl_shader_stage_name(info->stage), bt);
958 }
959
960 /* Apply the binding table indices. The backend compiler is not expected
961 * to change those, as we haven't set any of the *_start entries in elk
962 * binding_table.
963 */
964 nir_builder b = nir_builder_create(impl);
965
966 nir_foreach_block (block, impl) {
967 nir_foreach_instr (instr, block) {
968 if (instr->type == nir_instr_type_tex) {
969 nir_tex_instr *tex = nir_instr_as_tex(instr);
970 bool is_gather = devinfo->ver < 8 && tex->op == nir_texop_tg4;
971
972 /* rewrite the tg4 component from green to blue before replacing the
973 texture index */
974 if (devinfo->verx10 == 70) {
975 if (tex->component == 1)
976 if (key->gather_channel_quirk_mask & (1 << tex->texture_index))
977 tex->component = 2;
978 }
979
980 if (is_gather && devinfo->ver == 6 && key->gfx6_gather_wa[tex->texture_index]) {
981 b.cursor = nir_after_instr(instr);
982 enum elk_gfx6_gather_sampler_wa wa = key->gfx6_gather_wa[tex->texture_index];
983 int width = (wa & ELK_WA_8BIT) ? 8 : 16;
984
985 nir_def *val = nir_fmul_imm(&b, &tex->def, (1 << width) - 1);
986 val = nir_f2u32(&b, val);
987 if (wa & ELK_WA_SIGN) {
988 val = nir_ishl_imm(&b, val, 32 - width);
989 val = nir_ishr_imm(&b, val, 32 - width);
990 }
991 nir_def_rewrite_uses_after(&tex->def, val, val->parent_instr);
992 }
993
994 tex->texture_index =
995 crocus_group_index_to_bti(bt, is_gather ? CROCUS_SURFACE_GROUP_TEXTURE_GATHER : CROCUS_SURFACE_GROUP_TEXTURE,
996 tex->texture_index);
997 continue;
998 }
999
1000 if (instr->type != nir_instr_type_intrinsic)
1001 continue;
1002
1003 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
1004 switch (intrin->intrinsic) {
1005 case nir_intrinsic_image_size:
1006 case nir_intrinsic_image_load:
1007 case nir_intrinsic_image_store:
1008 case nir_intrinsic_image_atomic:
1009 case nir_intrinsic_image_atomic_swap:
1010 case nir_intrinsic_image_load_raw_intel:
1011 case nir_intrinsic_image_store_raw_intel:
1012 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
1013 CROCUS_SURFACE_GROUP_IMAGE);
1014 break;
1015
1016 case nir_intrinsic_load_ubo:
1017 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
1018 CROCUS_SURFACE_GROUP_UBO);
1019 break;
1020
1021 case nir_intrinsic_store_ssbo:
1022 rewrite_src_with_bti(&b, bt, instr, &intrin->src[1],
1023 CROCUS_SURFACE_GROUP_SSBO);
1024 break;
1025
1026 case nir_intrinsic_load_output:
1027 if (devinfo->ver >= 6) {
1028 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
1029 CROCUS_SURFACE_GROUP_RENDER_TARGET_READ);
1030 }
1031 break;
1032
1033 case nir_intrinsic_get_ssbo_size:
1034 case nir_intrinsic_ssbo_atomic:
1035 case nir_intrinsic_ssbo_atomic_swap:
1036 case nir_intrinsic_load_ssbo:
1037 rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
1038 CROCUS_SURFACE_GROUP_SSBO);
1039 break;
1040
1041 default:
1042 break;
1043 }
1044 }
1045 }
1046 }
1047
1048 static void
crocus_debug_recompile(struct crocus_context * ice,struct shader_info * info,const struct elk_base_prog_key * key)1049 crocus_debug_recompile(struct crocus_context *ice,
1050 struct shader_info *info,
1051 const struct elk_base_prog_key *key)
1052 {
1053 struct crocus_screen *screen = (struct crocus_screen *) ice->ctx.screen;
1054 const struct elk_compiler *c = screen->compiler;
1055
1056 if (!info)
1057 return;
1058
1059 elk_shader_perf_log(c, &ice->dbg, "Recompiling %s shader for program %s: %s\n",
1060 _mesa_shader_stage_to_string(info->stage),
1061 info->name ? info->name : "(no identifier)",
1062 info->label ? info->label : "");
1063
1064 const void *old_key =
1065 crocus_find_previous_compile(ice, info->stage, key->program_string_id);
1066
1067 elk_debug_key_recompile(c, &ice->dbg, info->stage, old_key, key);
1068 }
1069
1070 /**
1071 * Get the shader for the last enabled geometry stage.
1072 *
1073 * This stage is the one which will feed stream output and the rasterizer.
1074 */
1075 static gl_shader_stage
last_vue_stage(struct crocus_context * ice)1076 last_vue_stage(struct crocus_context *ice)
1077 {
1078 if (ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
1079 return MESA_SHADER_GEOMETRY;
1080
1081 if (ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
1082 return MESA_SHADER_TESS_EVAL;
1083
1084 return MESA_SHADER_VERTEX;
1085 }
1086
1087 static GLbitfield64
crocus_vs_outputs_written(struct crocus_context * ice,const struct elk_vs_prog_key * key,GLbitfield64 user_varyings)1088 crocus_vs_outputs_written(struct crocus_context *ice,
1089 const struct elk_vs_prog_key *key,
1090 GLbitfield64 user_varyings)
1091 {
1092 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1093 const struct intel_device_info *devinfo = &screen->devinfo;
1094 GLbitfield64 outputs_written = user_varyings;
1095
1096 if (devinfo->ver < 6) {
1097
1098 if (key->copy_edgeflag)
1099 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
1100
1101 /* Put dummy slots into the VUE for the SF to put the replaced
1102 * point sprite coords in. We shouldn't need these dummy slots,
1103 * which take up precious URB space, but it would mean that the SF
1104 * doesn't get nice aligned pairs of input coords into output
1105 * coords, which would be a pain to handle.
1106 */
1107 for (unsigned i = 0; i < 8; i++) {
1108 if (key->point_coord_replace & (1 << i))
1109 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
1110 }
1111
1112 /* if back colors are written, allocate slots for front colors too */
1113 if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
1114 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0);
1115 if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
1116 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1);
1117 }
1118
1119 /* In order for legacy clipping to work, we need to populate the clip
1120 * distance varying slots whenever clipping is enabled, even if the vertex
1121 * shader doesn't write to gl_ClipDistance.
1122 */
1123 if (key->nr_userclip_plane_consts > 0) {
1124 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
1125 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
1126 }
1127
1128 return outputs_written;
1129 }
1130
1131 /*
1132 * If no edgeflags come from the user, gen4/5
1133 * require giving the clip shader a default edgeflag.
1134 *
1135 * This will always be 1.0.
1136 */
1137 static void
crocus_lower_default_edgeflags(struct nir_shader * nir)1138 crocus_lower_default_edgeflags(struct nir_shader *nir)
1139 {
1140 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1141
1142 nir_builder b = nir_builder_at(nir_after_impl(impl));
1143
1144 nir_variable *var = nir_variable_create(nir, nir_var_shader_out,
1145 glsl_float_type(),
1146 "edgeflag");
1147 var->data.location = VARYING_SLOT_EDGE;
1148 nir_store_var(&b, var, nir_imm_float(&b, 1.0), 0x1);
1149 }
1150
1151 /**
1152 * Compile a vertex shader, and upload the assembly.
1153 */
1154 static struct crocus_compiled_shader *
crocus_compile_vs(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_vs_prog_key * key)1155 crocus_compile_vs(struct crocus_context *ice,
1156 struct crocus_uncompiled_shader *ish,
1157 const struct elk_vs_prog_key *key)
1158 {
1159 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1160 const struct elk_compiler *compiler = screen->compiler;
1161 const struct intel_device_info *devinfo = &screen->devinfo;
1162 void *mem_ctx = ralloc_context(NULL);
1163 struct elk_vs_prog_data *vs_prog_data =
1164 rzalloc(mem_ctx, struct elk_vs_prog_data);
1165 struct elk_vue_prog_data *vue_prog_data = &vs_prog_data->base;
1166 struct elk_stage_prog_data *prog_data = &vue_prog_data->base;
1167 enum elk_param_builtin *system_values;
1168 unsigned num_system_values;
1169 unsigned num_cbufs;
1170
1171 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1172
1173 if (key->nr_userclip_plane_consts) {
1174 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1175 /* Check if variables were found. */
1176 if (nir_lower_clip_vs(nir, (1 << key->nr_userclip_plane_consts) - 1,
1177 true, false, NULL)) {
1178 nir_lower_io_to_temporaries(nir, impl, true, false);
1179 nir_lower_global_vars_to_local(nir);
1180 nir_lower_vars_to_ssa(nir);
1181 nir_shader_gather_info(nir, impl);
1182 }
1183 }
1184
1185 if (key->clamp_pointsize)
1186 nir_lower_point_size(nir, 1.0, 255.0);
1187
1188 prog_data->use_alt_mode = nir->info.use_legacy_math_rules;
1189
1190 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
1191 &num_system_values, &num_cbufs);
1192
1193 crocus_lower_swizzles(nir, &key->base.tex);
1194
1195 if (devinfo->ver <= 5 &&
1196 !(nir->info.inputs_read & BITFIELD64_BIT(VERT_ATTRIB_EDGEFLAG)))
1197 crocus_lower_default_edgeflags(nir);
1198
1199 struct crocus_binding_table bt;
1200 crocus_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1201 num_system_values, num_cbufs, &key->base.tex);
1202
1203 if (can_push_ubo(devinfo))
1204 elk_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
1205
1206 uint64_t outputs_written =
1207 crocus_vs_outputs_written(ice, key, nir->info.outputs_written);
1208 elk_compute_vue_map(devinfo,
1209 &vue_prog_data->vue_map, outputs_written,
1210 nir->info.separate_shader, /* pos slots */ 1);
1211
1212 /* Don't tell the backend about our clip plane constants, we've already
1213 * lowered them in NIR and we don't want it doing it again.
1214 */
1215 struct elk_vs_prog_key key_no_ucp = *key;
1216 key_no_ucp.nr_userclip_plane_consts = 0;
1217 key_no_ucp.copy_edgeflag = false;
1218 crocus_sanitize_tex_key(&key_no_ucp.base.tex);
1219
1220 struct elk_compile_vs_params params = {
1221 .base = {
1222 .mem_ctx = mem_ctx,
1223 .nir = nir,
1224 .log_data = &ice->dbg,
1225 },
1226 .key = &key_no_ucp,
1227 .prog_data = vs_prog_data,
1228 .edgeflag_is_last = devinfo->ver < 6,
1229 };
1230 const unsigned *program =
1231 elk_compile_vs(compiler, ¶ms);
1232 if (program == NULL) {
1233 dbg_printf("Failed to compile vertex shader: %s\n", params.base.error_str);
1234 ralloc_free(mem_ctx);
1235 return false;
1236 }
1237
1238 if (ish->compiled_once) {
1239 crocus_debug_recompile(ice, &nir->info, &key->base);
1240 } else {
1241 ish->compiled_once = true;
1242 }
1243
1244 uint32_t *so_decls = NULL;
1245 if (devinfo->ver > 6)
1246 so_decls = screen->vtbl.create_so_decl_list(&ish->stream_output,
1247 &vue_prog_data->vue_map);
1248
1249 struct crocus_compiled_shader *shader =
1250 crocus_upload_shader(ice, CROCUS_CACHE_VS, sizeof(*key), key, program,
1251 prog_data->program_size,
1252 prog_data, sizeof(*vs_prog_data), so_decls,
1253 system_values, num_system_values,
1254 num_cbufs, &bt);
1255
1256 crocus_disk_cache_store(screen->disk_cache, ish, shader,
1257 ice->shaders.cache_bo_map,
1258 key, sizeof(*key));
1259
1260 ralloc_free(mem_ctx);
1261 return shader;
1262 }
1263
1264 /**
1265 * Update the current vertex shader variant.
1266 *
1267 * Fill out the key, look in the cache, compile and bind if needed.
1268 */
1269 static void
crocus_update_compiled_vs(struct crocus_context * ice)1270 crocus_update_compiled_vs(struct crocus_context *ice)
1271 {
1272 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
1273 struct crocus_uncompiled_shader *ish =
1274 ice->shaders.uncompiled[MESA_SHADER_VERTEX];
1275 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1276 const struct intel_device_info *devinfo = &screen->devinfo;
1277 struct elk_vs_prog_key key = { KEY_INIT() };
1278
1279 if (ish->nos & (1ull << CROCUS_NOS_TEXTURES))
1280 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_VERTEX, ish,
1281 ish->nir->info.uses_texture_gather, &key.base.tex);
1282 screen->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1283
1284 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_VS];
1285 struct crocus_compiled_shader *shader =
1286 crocus_find_cached_shader(ice, CROCUS_CACHE_VS, sizeof(key), &key);
1287
1288 if (!shader)
1289 shader = crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1290
1291 if (!shader)
1292 shader = crocus_compile_vs(ice, ish, &key);
1293
1294 if (old != shader) {
1295 ice->shaders.prog[CROCUS_CACHE_VS] = shader;
1296 if (devinfo->ver == 8)
1297 ice->state.dirty |= CROCUS_DIRTY_GEN8_VF_SGVS;
1298 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_VS |
1299 CROCUS_STAGE_DIRTY_BINDINGS_VS |
1300 CROCUS_STAGE_DIRTY_CONSTANTS_VS;
1301 shs->sysvals_need_upload = true;
1302
1303 const struct elk_vs_prog_data *vs_prog_data =
1304 (void *) shader->prog_data;
1305 const bool uses_draw_params = vs_prog_data->uses_firstvertex ||
1306 vs_prog_data->uses_baseinstance;
1307 const bool uses_derived_draw_params = vs_prog_data->uses_drawid ||
1308 vs_prog_data->uses_is_indexed_draw;
1309 const bool needs_sgvs_element = uses_draw_params ||
1310 vs_prog_data->uses_instanceid ||
1311 vs_prog_data->uses_vertexid;
1312
1313 if (ice->state.vs_uses_draw_params != uses_draw_params ||
1314 ice->state.vs_uses_derived_draw_params != uses_derived_draw_params ||
1315 ice->state.vs_needs_edge_flag != ish->needs_edge_flag ||
1316 ice->state.vs_uses_vertexid != vs_prog_data->uses_vertexid ||
1317 ice->state.vs_uses_instanceid != vs_prog_data->uses_instanceid) {
1318 ice->state.dirty |= CROCUS_DIRTY_VERTEX_BUFFERS |
1319 CROCUS_DIRTY_VERTEX_ELEMENTS;
1320 }
1321 ice->state.vs_uses_draw_params = uses_draw_params;
1322 ice->state.vs_uses_derived_draw_params = uses_derived_draw_params;
1323 ice->state.vs_needs_sgvs_element = needs_sgvs_element;
1324 ice->state.vs_needs_edge_flag = ish->needs_edge_flag;
1325 ice->state.vs_uses_vertexid = vs_prog_data->uses_vertexid;
1326 ice->state.vs_uses_instanceid = vs_prog_data->uses_instanceid;
1327 }
1328 }
1329
1330 /**
1331 * Get the shader_info for a given stage, or NULL if the stage is disabled.
1332 */
1333 const struct shader_info *
crocus_get_shader_info(const struct crocus_context * ice,gl_shader_stage stage)1334 crocus_get_shader_info(const struct crocus_context *ice, gl_shader_stage stage)
1335 {
1336 const struct crocus_uncompiled_shader *ish = ice->shaders.uncompiled[stage];
1337
1338 if (!ish)
1339 return NULL;
1340
1341 const nir_shader *nir = ish->nir;
1342 return &nir->info;
1343 }
1344
1345 /**
1346 * Get the union of TCS output and TES input slots.
1347 *
1348 * TCS and TES need to agree on a common URB entry layout. In particular,
1349 * the data for all patch vertices is stored in a single URB entry (unlike
1350 * GS which has one entry per input vertex). This means that per-vertex
1351 * array indexing needs a stride.
1352 *
1353 * SSO requires locations to match, but doesn't require the number of
1354 * outputs/inputs to match (in fact, the TCS often has extra outputs).
1355 * So, we need to take the extra step of unifying these on the fly.
1356 */
1357 static void
get_unified_tess_slots(const struct crocus_context * ice,uint64_t * per_vertex_slots,uint32_t * per_patch_slots)1358 get_unified_tess_slots(const struct crocus_context *ice,
1359 uint64_t *per_vertex_slots,
1360 uint32_t *per_patch_slots)
1361 {
1362 const struct shader_info *tcs =
1363 crocus_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
1364 const struct shader_info *tes =
1365 crocus_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1366
1367 *per_vertex_slots = tes->inputs_read;
1368 *per_patch_slots = tes->patch_inputs_read;
1369
1370 if (tcs) {
1371 *per_vertex_slots |= tcs->outputs_written;
1372 *per_patch_slots |= tcs->patch_outputs_written;
1373 }
1374 }
1375
1376 /**
1377 * Compile a tessellation control shader, and upload the assembly.
1378 */
1379 static struct crocus_compiled_shader *
crocus_compile_tcs(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_tcs_prog_key * key)1380 crocus_compile_tcs(struct crocus_context *ice,
1381 struct crocus_uncompiled_shader *ish,
1382 const struct elk_tcs_prog_key *key)
1383 {
1384 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1385 const struct elk_compiler *compiler = screen->compiler;
1386 void *mem_ctx = ralloc_context(NULL);
1387 struct elk_tcs_prog_data *tcs_prog_data =
1388 rzalloc(mem_ctx, struct elk_tcs_prog_data);
1389 struct elk_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
1390 struct elk_stage_prog_data *prog_data = &vue_prog_data->base;
1391 const struct intel_device_info *devinfo = &screen->devinfo;
1392 enum elk_param_builtin *system_values = NULL;
1393 unsigned num_system_values = 0;
1394 unsigned num_cbufs = 0;
1395
1396 nir_shader *nir;
1397
1398 struct crocus_binding_table bt;
1399
1400 if (ish) {
1401 nir = nir_shader_clone(mem_ctx, ish->nir);
1402 } else {
1403 nir = elk_nir_create_passthrough_tcs(mem_ctx, compiler, key);
1404 }
1405
1406 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
1407 &num_system_values, &num_cbufs);
1408
1409 crocus_lower_swizzles(nir, &key->base.tex);
1410 crocus_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1411 num_system_values, num_cbufs, &key->base.tex);
1412 if (can_push_ubo(devinfo))
1413 elk_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
1414
1415 struct elk_tcs_prog_key key_clean = *key;
1416 crocus_sanitize_tex_key(&key_clean.base.tex);
1417
1418 struct elk_compile_tcs_params params = {
1419 .base = {
1420 .mem_ctx = mem_ctx,
1421 .nir = nir,
1422 .log_data = &ice->dbg,
1423 },
1424 .key = &key_clean,
1425 .prog_data = tcs_prog_data,
1426 };
1427
1428 const unsigned *program = elk_compile_tcs(compiler, ¶ms);
1429 if (program == NULL) {
1430 dbg_printf("Failed to compile control shader: %s\n", params.base.error_str);
1431 ralloc_free(mem_ctx);
1432 return false;
1433 }
1434
1435 if (ish) {
1436 if (ish->compiled_once) {
1437 crocus_debug_recompile(ice, &nir->info, &key->base);
1438 } else {
1439 ish->compiled_once = true;
1440 }
1441 }
1442
1443 struct crocus_compiled_shader *shader =
1444 crocus_upload_shader(ice, CROCUS_CACHE_TCS, sizeof(*key), key, program,
1445 prog_data->program_size,
1446 prog_data, sizeof(*tcs_prog_data), NULL,
1447 system_values, num_system_values,
1448 num_cbufs, &bt);
1449
1450 if (ish)
1451 crocus_disk_cache_store(screen->disk_cache, ish, shader,
1452 ice->shaders.cache_bo_map,
1453 key, sizeof(*key));
1454
1455 ralloc_free(mem_ctx);
1456 return shader;
1457 }
1458
1459 /**
1460 * Update the current tessellation control shader variant.
1461 *
1462 * Fill out the key, look in the cache, compile and bind if needed.
1463 */
1464 static void
crocus_update_compiled_tcs(struct crocus_context * ice)1465 crocus_update_compiled_tcs(struct crocus_context *ice)
1466 {
1467 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
1468 struct crocus_uncompiled_shader *tcs =
1469 ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL];
1470 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1471 const struct intel_device_info *devinfo = &screen->devinfo;
1472
1473 const struct shader_info *tes_info =
1474 crocus_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
1475 struct elk_tcs_prog_key key = {
1476 KEY_INIT_NO_ID(),
1477 .base.program_string_id = tcs ? tcs->program_id : 0,
1478 ._tes_primitive_mode = tes_info->tess._primitive_mode,
1479 .input_vertices = ice->state.vertices_per_patch,
1480 .quads_workaround = tes_info->tess._primitive_mode == TESS_PRIMITIVE_QUADS &&
1481 tes_info->tess.spacing == TESS_SPACING_EQUAL,
1482 };
1483
1484 if (tcs && tcs->nos & (1ull << CROCUS_NOS_TEXTURES))
1485 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_TESS_CTRL, tcs,
1486 tcs->nir->info.uses_texture_gather, &key.base.tex);
1487 get_unified_tess_slots(ice, &key.outputs_written,
1488 &key.patch_outputs_written);
1489 screen->vtbl.populate_tcs_key(ice, &key);
1490
1491 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_TCS];
1492 struct crocus_compiled_shader *shader =
1493 crocus_find_cached_shader(ice, CROCUS_CACHE_TCS, sizeof(key), &key);
1494
1495 if (tcs && !shader)
1496 shader = crocus_disk_cache_retrieve(ice, tcs, &key, sizeof(key));
1497
1498 if (!shader)
1499 shader = crocus_compile_tcs(ice, tcs, &key);
1500
1501 if (old != shader) {
1502 ice->shaders.prog[CROCUS_CACHE_TCS] = shader;
1503 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_TCS |
1504 CROCUS_STAGE_DIRTY_BINDINGS_TCS |
1505 CROCUS_STAGE_DIRTY_CONSTANTS_TCS;
1506 shs->sysvals_need_upload = true;
1507 }
1508 }
1509
1510 /**
1511 * Compile a tessellation evaluation shader, and upload the assembly.
1512 */
1513 static struct crocus_compiled_shader *
crocus_compile_tes(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_tes_prog_key * key)1514 crocus_compile_tes(struct crocus_context *ice,
1515 struct crocus_uncompiled_shader *ish,
1516 const struct elk_tes_prog_key *key)
1517 {
1518 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1519 const struct elk_compiler *compiler = screen->compiler;
1520 void *mem_ctx = ralloc_context(NULL);
1521 struct elk_tes_prog_data *tes_prog_data =
1522 rzalloc(mem_ctx, struct elk_tes_prog_data);
1523 struct elk_vue_prog_data *vue_prog_data = &tes_prog_data->base;
1524 struct elk_stage_prog_data *prog_data = &vue_prog_data->base;
1525 enum elk_param_builtin *system_values;
1526 const struct intel_device_info *devinfo = &screen->devinfo;
1527 unsigned num_system_values;
1528 unsigned num_cbufs;
1529
1530 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1531
1532 if (key->nr_userclip_plane_consts) {
1533 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1534 nir_lower_clip_vs(nir, (1 << key->nr_userclip_plane_consts) - 1, true,
1535 false, NULL);
1536 nir_lower_io_to_temporaries(nir, impl, true, false);
1537 nir_lower_global_vars_to_local(nir);
1538 nir_lower_vars_to_ssa(nir);
1539 nir_shader_gather_info(nir, impl);
1540 }
1541
1542 if (key->clamp_pointsize)
1543 nir_lower_point_size(nir, 1.0, 255.0);
1544
1545 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
1546 &num_system_values, &num_cbufs);
1547 crocus_lower_swizzles(nir, &key->base.tex);
1548 struct crocus_binding_table bt;
1549 crocus_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1550 num_system_values, num_cbufs, &key->base.tex);
1551
1552 if (can_push_ubo(devinfo))
1553 elk_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
1554
1555 struct intel_vue_map input_vue_map;
1556 elk_compute_tess_vue_map(&input_vue_map, key->inputs_read,
1557 key->patch_inputs_read);
1558
1559 struct elk_tes_prog_key key_clean = *key;
1560 crocus_sanitize_tex_key(&key_clean.base.tex);
1561
1562 struct elk_compile_tes_params params = {
1563 .base = {
1564 .mem_ctx = mem_ctx,
1565 .nir = nir,
1566 .log_data = &ice->dbg,
1567 },
1568 .key = &key_clean,
1569 .prog_data = tes_prog_data,
1570 .input_vue_map = &input_vue_map,
1571 };
1572
1573 const unsigned *program = elk_compile_tes(compiler, ¶ms);
1574 if (program == NULL) {
1575 dbg_printf("Failed to compile evaluation shader: %s\n", params.base.error_str);
1576 ralloc_free(mem_ctx);
1577 return false;
1578 }
1579
1580 if (ish->compiled_once) {
1581 crocus_debug_recompile(ice, &nir->info, &key->base);
1582 } else {
1583 ish->compiled_once = true;
1584 }
1585
1586 uint32_t *so_decls = NULL;
1587 if (devinfo->ver > 6)
1588 so_decls = screen->vtbl.create_so_decl_list(&ish->stream_output,
1589 &vue_prog_data->vue_map);
1590
1591 struct crocus_compiled_shader *shader =
1592 crocus_upload_shader(ice, CROCUS_CACHE_TES, sizeof(*key), key, program,
1593 prog_data->program_size,
1594 prog_data, sizeof(*tes_prog_data), so_decls,
1595 system_values, num_system_values,
1596 num_cbufs, &bt);
1597
1598 crocus_disk_cache_store(screen->disk_cache, ish, shader,
1599 ice->shaders.cache_bo_map,
1600 key, sizeof(*key));
1601
1602 ralloc_free(mem_ctx);
1603 return shader;
1604 }
1605
1606 /**
1607 * Update the current tessellation evaluation shader variant.
1608 *
1609 * Fill out the key, look in the cache, compile and bind if needed.
1610 */
1611 static void
crocus_update_compiled_tes(struct crocus_context * ice)1612 crocus_update_compiled_tes(struct crocus_context *ice)
1613 {
1614 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
1615 struct crocus_uncompiled_shader *ish =
1616 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
1617 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1618 struct elk_tes_prog_key key = { KEY_INIT() };
1619 const struct intel_device_info *devinfo = &screen->devinfo;
1620
1621 if (ish->nos & (1ull << CROCUS_NOS_TEXTURES))
1622 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_TESS_EVAL, ish,
1623 ish->nir->info.uses_texture_gather, &key.base.tex);
1624 get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
1625 screen->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1626
1627 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_TES];
1628 struct crocus_compiled_shader *shader =
1629 crocus_find_cached_shader(ice, CROCUS_CACHE_TES, sizeof(key), &key);
1630
1631 if (!shader)
1632 shader = crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1633
1634 if (!shader)
1635 shader = crocus_compile_tes(ice, ish, &key);
1636
1637 if (old != shader) {
1638 ice->shaders.prog[CROCUS_CACHE_TES] = shader;
1639 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_TES |
1640 CROCUS_STAGE_DIRTY_BINDINGS_TES |
1641 CROCUS_STAGE_DIRTY_CONSTANTS_TES;
1642 shs->sysvals_need_upload = true;
1643 }
1644
1645 /* TODO: Could compare and avoid flagging this. */
1646 const struct shader_info *tes_info = &ish->nir->info;
1647 if (BITSET_TEST(tes_info->system_values_read, SYSTEM_VALUE_VERTICES_IN)) {
1648 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_CONSTANTS_TES;
1649 ice->state.shaders[MESA_SHADER_TESS_EVAL].sysvals_need_upload = true;
1650 }
1651 }
1652
1653 /**
1654 * Compile a geometry shader, and upload the assembly.
1655 */
1656 static struct crocus_compiled_shader *
crocus_compile_gs(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_gs_prog_key * key)1657 crocus_compile_gs(struct crocus_context *ice,
1658 struct crocus_uncompiled_shader *ish,
1659 const struct elk_gs_prog_key *key)
1660 {
1661 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1662 const struct elk_compiler *compiler = screen->compiler;
1663 const struct intel_device_info *devinfo = &screen->devinfo;
1664 void *mem_ctx = ralloc_context(NULL);
1665 struct elk_gs_prog_data *gs_prog_data =
1666 rzalloc(mem_ctx, struct elk_gs_prog_data);
1667 struct elk_vue_prog_data *vue_prog_data = &gs_prog_data->base;
1668 struct elk_stage_prog_data *prog_data = &vue_prog_data->base;
1669 enum elk_param_builtin *system_values;
1670 unsigned num_system_values;
1671 unsigned num_cbufs;
1672
1673 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1674
1675 if (key->nr_userclip_plane_consts) {
1676 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
1677 nir_lower_clip_gs(nir, (1 << key->nr_userclip_plane_consts) - 1, false,
1678 NULL);
1679 nir_lower_io_to_temporaries(nir, impl, true, false);
1680 nir_lower_global_vars_to_local(nir);
1681 nir_lower_vars_to_ssa(nir);
1682 nir_shader_gather_info(nir, impl);
1683 }
1684
1685 if (key->clamp_pointsize)
1686 nir_lower_point_size(nir, 1.0, 255.0);
1687
1688 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
1689 &num_system_values, &num_cbufs);
1690 crocus_lower_swizzles(nir, &key->base.tex);
1691 struct crocus_binding_table bt;
1692 crocus_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
1693 num_system_values, num_cbufs, &key->base.tex);
1694
1695 if (can_push_ubo(devinfo))
1696 elk_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
1697
1698 elk_compute_vue_map(devinfo,
1699 &vue_prog_data->vue_map, nir->info.outputs_written,
1700 nir->info.separate_shader, /* pos slots */ 1);
1701
1702 if (devinfo->ver == 6)
1703 gfx6_gs_xfb_setup(&ish->stream_output, gs_prog_data);
1704 struct elk_gs_prog_key key_clean = *key;
1705 crocus_sanitize_tex_key(&key_clean.base.tex);
1706
1707 struct elk_compile_gs_params params = {
1708 .base = {
1709 .mem_ctx = mem_ctx,
1710 .nir = nir,
1711 .log_data = &ice->dbg,
1712 },
1713 .key = &key_clean,
1714 .prog_data = gs_prog_data,
1715 };
1716
1717 const unsigned *program = elk_compile_gs(compiler, ¶ms);
1718 if (program == NULL) {
1719 dbg_printf("Failed to compile geometry shader: %s\n", params.base.error_str);
1720 ralloc_free(mem_ctx);
1721 return false;
1722 }
1723
1724 if (ish->compiled_once) {
1725 crocus_debug_recompile(ice, &nir->info, &key->base);
1726 } else {
1727 ish->compiled_once = true;
1728 }
1729
1730 uint32_t *so_decls = NULL;
1731 if (devinfo->ver > 6)
1732 so_decls = screen->vtbl.create_so_decl_list(&ish->stream_output,
1733 &vue_prog_data->vue_map);
1734
1735 struct crocus_compiled_shader *shader =
1736 crocus_upload_shader(ice, CROCUS_CACHE_GS, sizeof(*key), key, program,
1737 prog_data->program_size,
1738 prog_data, sizeof(*gs_prog_data), so_decls,
1739 system_values, num_system_values,
1740 num_cbufs, &bt);
1741
1742 crocus_disk_cache_store(screen->disk_cache, ish, shader,
1743 ice->shaders.cache_bo_map,
1744 key, sizeof(*key));
1745
1746 ralloc_free(mem_ctx);
1747 return shader;
1748 }
1749
1750 /**
1751 * Update the current geometry shader variant.
1752 *
1753 * Fill out the key, look in the cache, compile and bind if needed.
1754 */
1755 static void
crocus_update_compiled_gs(struct crocus_context * ice)1756 crocus_update_compiled_gs(struct crocus_context *ice)
1757 {
1758 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
1759 struct crocus_uncompiled_shader *ish =
1760 ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
1761 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_GS];
1762 struct crocus_compiled_shader *shader = NULL;
1763
1764 if (ish) {
1765 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1766 const struct intel_device_info *devinfo = &screen->devinfo;
1767 struct elk_gs_prog_key key = { KEY_INIT() };
1768
1769 if (ish->nos & (1ull << CROCUS_NOS_TEXTURES))
1770 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_GEOMETRY, ish,
1771 ish->nir->info.uses_texture_gather, &key.base.tex);
1772 screen->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
1773
1774 shader =
1775 crocus_find_cached_shader(ice, CROCUS_CACHE_GS, sizeof(key), &key);
1776
1777 if (!shader)
1778 shader = crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1779
1780 if (!shader)
1781 shader = crocus_compile_gs(ice, ish, &key);
1782 }
1783
1784 if (old != shader) {
1785 ice->shaders.prog[CROCUS_CACHE_GS] = shader;
1786 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_GS |
1787 CROCUS_STAGE_DIRTY_BINDINGS_GS |
1788 CROCUS_STAGE_DIRTY_CONSTANTS_GS;
1789 shs->sysvals_need_upload = true;
1790 }
1791 }
1792
1793 /**
1794 * Compile a fragment (pixel) shader, and upload the assembly.
1795 */
1796 static struct crocus_compiled_shader *
crocus_compile_fs(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_wm_prog_key * key,struct intel_vue_map * vue_map)1797 crocus_compile_fs(struct crocus_context *ice,
1798 struct crocus_uncompiled_shader *ish,
1799 const struct elk_wm_prog_key *key,
1800 struct intel_vue_map *vue_map)
1801 {
1802 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1803 const struct elk_compiler *compiler = screen->compiler;
1804 void *mem_ctx = ralloc_context(NULL);
1805 struct elk_wm_prog_data *fs_prog_data =
1806 rzalloc(mem_ctx, struct elk_wm_prog_data);
1807 struct elk_stage_prog_data *prog_data = &fs_prog_data->base;
1808 enum elk_param_builtin *system_values;
1809 const struct intel_device_info *devinfo = &screen->devinfo;
1810 unsigned num_system_values;
1811 unsigned num_cbufs;
1812
1813 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
1814
1815 prog_data->use_alt_mode = nir->info.use_legacy_math_rules;
1816
1817 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
1818 &num_system_values, &num_cbufs);
1819
1820 /* Lower output variables to load_output intrinsics before setting up
1821 * binding tables, so crocus_setup_binding_table can map any load_output
1822 * intrinsics to CROCUS_SURFACE_GROUP_RENDER_TARGET_READ on Gen8 for
1823 * non-coherent framebuffer fetches.
1824 */
1825 elk_nir_lower_fs_outputs(nir);
1826
1827 /* lower swizzles before binding table */
1828 crocus_lower_swizzles(nir, &key->base.tex);
1829 int null_rts = 1;
1830
1831 struct crocus_binding_table bt;
1832 crocus_setup_binding_table(devinfo, nir, &bt,
1833 MAX2(key->nr_color_regions, null_rts),
1834 num_system_values, num_cbufs,
1835 &key->base.tex);
1836
1837 if (can_push_ubo(devinfo))
1838 elk_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
1839
1840 struct elk_wm_prog_key key_clean = *key;
1841 crocus_sanitize_tex_key(&key_clean.base.tex);
1842
1843 struct elk_compile_fs_params params = {
1844 .base = {
1845 .mem_ctx = mem_ctx,
1846 .nir = nir,
1847 .log_data = &ice->dbg,
1848 },
1849 .key = &key_clean,
1850 .prog_data = fs_prog_data,
1851
1852 .allow_spilling = true,
1853 .max_polygons = 1,
1854 .vue_map = vue_map,
1855 };
1856 const unsigned *program =
1857 elk_compile_fs(compiler, ¶ms);
1858 if (program == NULL) {
1859 dbg_printf("Failed to compile fragment shader: %s\n", params.base.error_str);
1860 ralloc_free(mem_ctx);
1861 return false;
1862 }
1863
1864 if (ish->compiled_once) {
1865 crocus_debug_recompile(ice, &nir->info, &key->base);
1866 } else {
1867 ish->compiled_once = true;
1868 }
1869
1870 struct crocus_compiled_shader *shader =
1871 crocus_upload_shader(ice, CROCUS_CACHE_FS, sizeof(*key), key, program,
1872 prog_data->program_size,
1873 prog_data, sizeof(*fs_prog_data), NULL,
1874 system_values, num_system_values,
1875 num_cbufs, &bt);
1876
1877 crocus_disk_cache_store(screen->disk_cache, ish, shader,
1878 ice->shaders.cache_bo_map,
1879 key, sizeof(*key));
1880
1881 ralloc_free(mem_ctx);
1882 return shader;
1883 }
1884
1885 /**
1886 * Update the current fragment shader variant.
1887 *
1888 * Fill out the key, look in the cache, compile and bind if needed.
1889 */
1890 static void
crocus_update_compiled_fs(struct crocus_context * ice)1891 crocus_update_compiled_fs(struct crocus_context *ice)
1892 {
1893 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1894 const struct intel_device_info *devinfo = &screen->devinfo;
1895 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_FRAGMENT];
1896 struct crocus_uncompiled_shader *ish =
1897 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
1898 struct elk_wm_prog_key key = { KEY_INIT() };
1899
1900 if (ish->nos & (1ull << CROCUS_NOS_TEXTURES))
1901 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_FRAGMENT, ish,
1902 ish->nir->info.uses_texture_gather, &key.base.tex);
1903 screen->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
1904
1905 if (ish->nos & (1ull << CROCUS_NOS_LAST_VUE_MAP))
1906 key.input_slots_valid = ice->shaders.last_vue_map->slots_valid;
1907
1908 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_FS];
1909 struct crocus_compiled_shader *shader =
1910 crocus_find_cached_shader(ice, CROCUS_CACHE_FS, sizeof(key), &key);
1911
1912 if (!shader)
1913 shader = crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key));
1914
1915 if (!shader)
1916 shader = crocus_compile_fs(ice, ish, &key, ice->shaders.last_vue_map);
1917
1918 if (old != shader) {
1919 // XXX: only need to flag CLIP if barycentric has NONPERSPECTIVE
1920 // toggles. might be able to avoid flagging SBE too.
1921 ice->shaders.prog[CROCUS_CACHE_FS] = shader;
1922 ice->state.dirty |= CROCUS_DIRTY_WM;
1923 /* gen4 clip/sf rely on fs prog_data */
1924 if (devinfo->ver < 6)
1925 ice->state.dirty |= CROCUS_DIRTY_GEN4_CLIP_PROG | CROCUS_DIRTY_GEN4_SF_PROG;
1926 else
1927 ice->state.dirty |= CROCUS_DIRTY_CLIP | CROCUS_DIRTY_GEN6_BLEND_STATE;
1928 if (devinfo->ver == 6)
1929 ice->state.dirty |= CROCUS_DIRTY_RASTER;
1930 if (devinfo->ver >= 7)
1931 ice->state.dirty |= CROCUS_DIRTY_GEN7_SBE;
1932 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_FS |
1933 CROCUS_STAGE_DIRTY_BINDINGS_FS |
1934 CROCUS_STAGE_DIRTY_CONSTANTS_FS;
1935 shs->sysvals_need_upload = true;
1936 }
1937 }
1938
1939 /**
1940 * Update the last enabled stage's VUE map.
1941 *
1942 * When the shader feeding the rasterizer's output interface changes, we
1943 * need to re-emit various packets.
1944 */
1945 static void
update_last_vue_map(struct crocus_context * ice,struct elk_stage_prog_data * prog_data)1946 update_last_vue_map(struct crocus_context *ice,
1947 struct elk_stage_prog_data *prog_data)
1948 {
1949 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
1950 const struct intel_device_info *devinfo = &screen->devinfo;
1951 struct elk_vue_prog_data *vue_prog_data = (void *) prog_data;
1952 struct intel_vue_map *vue_map = &vue_prog_data->vue_map;
1953 struct intel_vue_map *old_map = ice->shaders.last_vue_map;
1954 const uint64_t changed_slots =
1955 (old_map ? old_map->slots_valid : 0ull) ^ vue_map->slots_valid;
1956
1957 if (changed_slots & VARYING_BIT_VIEWPORT) {
1958 ice->state.num_viewports =
1959 (vue_map->slots_valid & VARYING_BIT_VIEWPORT) ? CROCUS_MAX_VIEWPORTS : 1;
1960 ice->state.dirty |= CROCUS_DIRTY_SF_CL_VIEWPORT |
1961 CROCUS_DIRTY_CC_VIEWPORT;
1962 if (devinfo->ver < 6)
1963 ice->state.dirty |= CROCUS_DIRTY_GEN4_CLIP_PROG | CROCUS_DIRTY_GEN4_SF_PROG;
1964
1965 if (devinfo->ver <= 6)
1966 ice->state.dirty |= CROCUS_DIRTY_GEN4_FF_GS_PROG;
1967
1968 if (devinfo->ver >= 6)
1969 ice->state.dirty |= CROCUS_DIRTY_CLIP |
1970 CROCUS_DIRTY_GEN6_SCISSOR_RECT;;
1971 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_UNCOMPILED_FS |
1972 ice->state.stage_dirty_for_nos[CROCUS_NOS_LAST_VUE_MAP];
1973 }
1974
1975 if (changed_slots || (old_map && old_map->separate != vue_map->separate)) {
1976 ice->state.dirty |= CROCUS_DIRTY_GEN7_SBE;
1977 if (devinfo->ver < 6)
1978 ice->state.dirty |= CROCUS_DIRTY_GEN4_FF_GS_PROG;
1979 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_UNCOMPILED_FS;
1980 }
1981
1982 ice->shaders.last_vue_map = &vue_prog_data->vue_map;
1983 }
1984
1985 static void
crocus_update_pull_constant_descriptors(struct crocus_context * ice,gl_shader_stage stage)1986 crocus_update_pull_constant_descriptors(struct crocus_context *ice,
1987 gl_shader_stage stage)
1988 {
1989 struct crocus_compiled_shader *shader = ice->shaders.prog[stage];
1990
1991 if (!shader || !shader->prog_data->has_ubo_pull)
1992 return;
1993
1994 struct crocus_shader_state *shs = &ice->state.shaders[stage];
1995 bool any_new_descriptors =
1996 shader->num_system_values > 0 && shs->sysvals_need_upload;
1997
1998 unsigned bound_cbufs = shs->bound_cbufs;
1999
2000 while (bound_cbufs) {
2001 const int i = u_bit_scan(&bound_cbufs);
2002 struct pipe_constant_buffer *cbuf = &shs->constbufs[i];
2003 if (cbuf->buffer) {
2004 any_new_descriptors = true;
2005 }
2006 }
2007
2008 if (any_new_descriptors)
2009 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_BINDINGS_VS << stage;
2010 }
2011
2012 /**
2013 * Get the prog_data for a given stage, or NULL if the stage is disabled.
2014 */
2015 static struct elk_vue_prog_data *
get_vue_prog_data(struct crocus_context * ice,gl_shader_stage stage)2016 get_vue_prog_data(struct crocus_context *ice, gl_shader_stage stage)
2017 {
2018 if (!ice->shaders.prog[stage])
2019 return NULL;
2020
2021 return (void *) ice->shaders.prog[stage]->prog_data;
2022 }
2023
2024 static struct crocus_compiled_shader *
crocus_compile_clip(struct crocus_context * ice,struct elk_clip_prog_key * key)2025 crocus_compile_clip(struct crocus_context *ice, struct elk_clip_prog_key *key)
2026 {
2027 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2028 const struct elk_compiler *compiler = screen->compiler;
2029 void *mem_ctx;
2030 unsigned program_size;
2031 mem_ctx = ralloc_context(NULL);
2032
2033 struct elk_clip_prog_data *clip_prog_data =
2034 rzalloc(mem_ctx, struct elk_clip_prog_data);
2035
2036 const unsigned *program = elk_compile_clip(compiler, mem_ctx, key, clip_prog_data,
2037 ice->shaders.last_vue_map, &program_size);
2038
2039 if (program == NULL) {
2040 dbg_printf("failed to compile clip shader\n");
2041 ralloc_free(mem_ctx);
2042 return false;
2043 }
2044 struct crocus_binding_table bt;
2045 memset(&bt, 0, sizeof(bt));
2046
2047 struct crocus_compiled_shader *shader =
2048 crocus_upload_shader(ice, CROCUS_CACHE_CLIP, sizeof(*key), key, program,
2049 program_size,
2050 (struct elk_stage_prog_data *)clip_prog_data, sizeof(*clip_prog_data),
2051 NULL, NULL, 0, 0, &bt);
2052 ralloc_free(mem_ctx);
2053 return shader;
2054 }
2055 static void
crocus_update_compiled_clip(struct crocus_context * ice)2056 crocus_update_compiled_clip(struct crocus_context *ice)
2057 {
2058 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2059 struct elk_clip_prog_key key;
2060 struct crocus_compiled_shader *old = ice->shaders.clip_prog;
2061 memset(&key, 0, sizeof(key));
2062
2063 const struct elk_wm_prog_data *wm_prog_data = elk_wm_prog_data(ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data);
2064 if (wm_prog_data) {
2065 key.contains_flat_varying = wm_prog_data->contains_flat_varying;
2066 key.contains_noperspective_varying =
2067 wm_prog_data->contains_noperspective_varying;
2068 memcpy(key.interp_mode, wm_prog_data->interp_mode, sizeof(key.interp_mode));
2069 }
2070
2071 key.primitive = ice->state.reduced_prim_mode;
2072 key.attrs = ice->shaders.last_vue_map->slots_valid;
2073
2074 struct pipe_rasterizer_state *rs_state = crocus_get_rast_state(ice);
2075 key.pv_first = rs_state->flatshade_first;
2076
2077 if (rs_state->clip_plane_enable)
2078 key.nr_userclip = util_logbase2(rs_state->clip_plane_enable) + 1;
2079
2080 if (screen->devinfo.ver == 5)
2081 key.clip_mode = ELK_CLIP_MODE_KERNEL_CLIP;
2082 else
2083 key.clip_mode = ELK_CLIP_MODE_NORMAL;
2084
2085 if (key.primitive == MESA_PRIM_TRIANGLES) {
2086 if (rs_state->cull_face == PIPE_FACE_FRONT_AND_BACK)
2087 key.clip_mode = ELK_CLIP_MODE_REJECT_ALL;
2088 else {
2089 uint32_t fill_front = ELK_CLIP_FILL_MODE_CULL;
2090 uint32_t fill_back = ELK_CLIP_FILL_MODE_CULL;
2091 uint32_t offset_front = 0;
2092 uint32_t offset_back = 0;
2093
2094 if (!(rs_state->cull_face & PIPE_FACE_FRONT)) {
2095 switch (rs_state->fill_front) {
2096 case PIPE_POLYGON_MODE_FILL:
2097 fill_front = ELK_CLIP_FILL_MODE_FILL;
2098 offset_front = 0;
2099 break;
2100 case PIPE_POLYGON_MODE_LINE:
2101 fill_front = ELK_CLIP_FILL_MODE_LINE;
2102 offset_front = rs_state->offset_line;
2103 break;
2104 case PIPE_POLYGON_MODE_POINT:
2105 fill_front = ELK_CLIP_FILL_MODE_POINT;
2106 offset_front = rs_state->offset_point;
2107 break;
2108 }
2109 }
2110
2111 if (!(rs_state->cull_face & PIPE_FACE_BACK)) {
2112 switch (rs_state->fill_back) {
2113 case PIPE_POLYGON_MODE_FILL:
2114 fill_back = ELK_CLIP_FILL_MODE_FILL;
2115 offset_back = 0;
2116 break;
2117 case PIPE_POLYGON_MODE_LINE:
2118 fill_back = ELK_CLIP_FILL_MODE_LINE;
2119 offset_back = rs_state->offset_line;
2120 break;
2121 case PIPE_POLYGON_MODE_POINT:
2122 fill_back = ELK_CLIP_FILL_MODE_POINT;
2123 offset_back = rs_state->offset_point;
2124 break;
2125 }
2126 }
2127
2128 if (rs_state->fill_back != PIPE_POLYGON_MODE_FILL ||
2129 rs_state->fill_front != PIPE_POLYGON_MODE_FILL) {
2130 key.do_unfilled = 1;
2131
2132 /* Most cases the fixed function units will handle. Cases where
2133 * one or more polygon faces are unfilled will require help:
2134 */
2135 key.clip_mode = ELK_CLIP_MODE_CLIP_NON_REJECTED;
2136
2137 if (offset_back || offset_front) {
2138 double mrd = 0.0;
2139 if (ice->state.framebuffer.zsbuf)
2140 mrd = util_get_depth_format_mrd(util_format_description(ice->state.framebuffer.zsbuf->format));
2141 key.offset_units = rs_state->offset_units * mrd * 2;
2142 key.offset_factor = rs_state->offset_scale * mrd;
2143 key.offset_clamp = rs_state->offset_clamp * mrd;
2144 }
2145
2146 if (!(rs_state->front_ccw ^ rs_state->bottom_edge_rule)) {
2147 key.fill_ccw = fill_front;
2148 key.fill_cw = fill_back;
2149 key.offset_ccw = offset_front;
2150 key.offset_cw = offset_back;
2151 if (rs_state->light_twoside &&
2152 key.fill_cw != ELK_CLIP_FILL_MODE_CULL)
2153 key.copy_bfc_cw = 1;
2154 } else {
2155 key.fill_cw = fill_front;
2156 key.fill_ccw = fill_back;
2157 key.offset_cw = offset_front;
2158 key.offset_ccw = offset_back;
2159 if (rs_state->light_twoside &&
2160 key.fill_ccw != ELK_CLIP_FILL_MODE_CULL)
2161 key.copy_bfc_ccw = 1;
2162 }
2163 }
2164 }
2165 }
2166 struct crocus_compiled_shader *shader =
2167 crocus_find_cached_shader(ice, CROCUS_CACHE_CLIP, sizeof(key), &key);
2168
2169 if (!shader)
2170 shader = crocus_compile_clip(ice, &key);
2171
2172 if (old != shader) {
2173 ice->state.dirty |= CROCUS_DIRTY_CLIP;
2174 ice->shaders.clip_prog = shader;
2175 }
2176 }
2177
2178 static struct crocus_compiled_shader *
crocus_compile_sf(struct crocus_context * ice,struct elk_sf_prog_key * key)2179 crocus_compile_sf(struct crocus_context *ice, struct elk_sf_prog_key *key)
2180 {
2181 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2182 const struct elk_compiler *compiler = screen->compiler;
2183 void *mem_ctx;
2184 unsigned program_size;
2185 mem_ctx = ralloc_context(NULL);
2186
2187 struct elk_sf_prog_data *sf_prog_data =
2188 rzalloc(mem_ctx, struct elk_sf_prog_data);
2189
2190 const unsigned *program = elk_compile_sf(compiler, mem_ctx, key, sf_prog_data,
2191 ice->shaders.last_vue_map, &program_size);
2192
2193 if (program == NULL) {
2194 dbg_printf("failed to compile sf shader\n");
2195 ralloc_free(mem_ctx);
2196 return false;
2197 }
2198
2199 struct crocus_binding_table bt;
2200 memset(&bt, 0, sizeof(bt));
2201 struct crocus_compiled_shader *shader =
2202 crocus_upload_shader(ice, CROCUS_CACHE_SF, sizeof(*key), key, program,
2203 program_size,
2204 (struct elk_stage_prog_data *)sf_prog_data, sizeof(*sf_prog_data),
2205 NULL, NULL, 0, 0, &bt);
2206 ralloc_free(mem_ctx);
2207 return shader;
2208 }
2209
2210 static void
crocus_update_compiled_sf(struct crocus_context * ice)2211 crocus_update_compiled_sf(struct crocus_context *ice)
2212 {
2213 struct elk_sf_prog_key key;
2214 struct crocus_compiled_shader *old = ice->shaders.sf_prog;
2215 memset(&key, 0, sizeof(key));
2216
2217 key.attrs = ice->shaders.last_vue_map->slots_valid;
2218
2219 switch (ice->state.reduced_prim_mode) {
2220 case MESA_PRIM_TRIANGLES:
2221 default:
2222 if (key.attrs & BITFIELD64_BIT(VARYING_SLOT_EDGE))
2223 key.primitive = ELK_SF_PRIM_UNFILLED_TRIS;
2224 else
2225 key.primitive = ELK_SF_PRIM_TRIANGLES;
2226 break;
2227 case MESA_PRIM_LINES:
2228 key.primitive = ELK_SF_PRIM_LINES;
2229 break;
2230 case MESA_PRIM_POINTS:
2231 key.primitive = ELK_SF_PRIM_POINTS;
2232 break;
2233 }
2234
2235 struct pipe_rasterizer_state *rs_state = crocus_get_rast_state(ice);
2236 key.userclip_active = rs_state->clip_plane_enable != 0;
2237 const struct elk_wm_prog_data *wm_prog_data = elk_wm_prog_data(ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data);
2238 if (wm_prog_data) {
2239 key.contains_flat_varying = wm_prog_data->contains_flat_varying;
2240 memcpy(key.interp_mode, wm_prog_data->interp_mode, sizeof(key.interp_mode));
2241 }
2242
2243 key.do_twoside_color = rs_state->light_twoside;
2244
2245 key.do_point_sprite = rs_state->point_quad_rasterization;
2246 if (key.do_point_sprite) {
2247 key.point_sprite_coord_replace = rs_state->sprite_coord_enable & 0xff;
2248 if (rs_state->sprite_coord_enable & (1 << 8))
2249 key.do_point_coord = 1;
2250 if (wm_prog_data && wm_prog_data->urb_setup[VARYING_SLOT_PNTC] != -1)
2251 key.do_point_coord = 1;
2252 }
2253
2254 key.sprite_origin_lower_left = rs_state->sprite_coord_mode == PIPE_SPRITE_COORD_LOWER_LEFT;
2255
2256 if (key.do_twoside_color) {
2257 key.frontface_ccw = rs_state->front_ccw;
2258 }
2259 struct crocus_compiled_shader *shader =
2260 crocus_find_cached_shader(ice, CROCUS_CACHE_SF, sizeof(key), &key);
2261
2262 if (!shader)
2263 shader = crocus_compile_sf(ice, &key);
2264
2265 if (old != shader) {
2266 ice->state.dirty |= CROCUS_DIRTY_RASTER;
2267 ice->shaders.sf_prog = shader;
2268 }
2269 }
2270
2271 static struct crocus_compiled_shader *
crocus_compile_ff_gs(struct crocus_context * ice,struct elk_ff_gs_prog_key * key)2272 crocus_compile_ff_gs(struct crocus_context *ice, struct elk_ff_gs_prog_key *key)
2273 {
2274 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2275 struct elk_compiler *compiler = screen->compiler;
2276 void *mem_ctx;
2277 unsigned program_size;
2278 mem_ctx = ralloc_context(NULL);
2279
2280 struct elk_ff_gs_prog_data *ff_gs_prog_data =
2281 rzalloc(mem_ctx, struct elk_ff_gs_prog_data);
2282
2283 const unsigned *program = elk_compile_ff_gs_prog(compiler, mem_ctx, key, ff_gs_prog_data,
2284 ice->shaders.last_vue_map, &program_size);
2285
2286 if (program == NULL) {
2287 dbg_printf("failed to compile sf shader\n");
2288 ralloc_free(mem_ctx);
2289 return false;
2290 }
2291
2292 struct crocus_binding_table bt;
2293 memset(&bt, 0, sizeof(bt));
2294
2295 if (screen->devinfo.ver == 6) {
2296 bt.sizes[CROCUS_SURFACE_GROUP_SOL] = ELK_MAX_SOL_BINDINGS;
2297 bt.used_mask[CROCUS_SURFACE_GROUP_SOL] = (uint64_t)-1;
2298
2299 bt.size_bytes = ELK_MAX_SOL_BINDINGS * 4;
2300 }
2301
2302 struct crocus_compiled_shader *shader =
2303 crocus_upload_shader(ice, CROCUS_CACHE_FF_GS, sizeof(*key), key, program,
2304 program_size,
2305 (struct elk_stage_prog_data *)ff_gs_prog_data, sizeof(*ff_gs_prog_data),
2306 NULL, NULL, 0, 0, &bt);
2307 ralloc_free(mem_ctx);
2308 return shader;
2309 }
2310
2311 static void
crocus_update_compiled_ff_gs(struct crocus_context * ice)2312 crocus_update_compiled_ff_gs(struct crocus_context *ice)
2313 {
2314 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2315 const struct intel_device_info *devinfo = &screen->devinfo;
2316 struct elk_ff_gs_prog_key key;
2317 struct crocus_compiled_shader *old = ice->shaders.ff_gs_prog;
2318 memset(&key, 0, sizeof(key));
2319
2320 assert(devinfo->ver < 7);
2321
2322 key.attrs = ice->shaders.last_vue_map->slots_valid;
2323
2324 key.primitive = screen->vtbl.translate_prim_type(ice->state.prim_mode, 0);
2325
2326 struct pipe_rasterizer_state *rs_state = crocus_get_rast_state(ice);
2327 key.pv_first = rs_state->flatshade_first;
2328
2329 if (key.primitive == _3DPRIM_QUADLIST && !rs_state->flatshade) {
2330 /* Provide consistenbbbbbt primitive order with elk_set_prim's
2331 * optimization of single quads to trifans.
2332 */
2333 key.pv_first = true;
2334 }
2335
2336 if (devinfo->ver >= 6) {
2337 key.need_gs_prog = ice->state.streamout_active;
2338 if (key.need_gs_prog) {
2339 struct crocus_uncompiled_shader *vs =
2340 ice->shaders.uncompiled[MESA_SHADER_VERTEX];
2341 gfx6_ff_gs_xfb_setup(&vs->stream_output,
2342 &key);
2343 }
2344 } else {
2345 key.need_gs_prog = (key.primitive == _3DPRIM_QUADLIST ||
2346 key.primitive == _3DPRIM_QUADSTRIP ||
2347 key.primitive == _3DPRIM_LINELOOP);
2348 }
2349
2350 struct crocus_compiled_shader *shader = NULL;
2351 if (key.need_gs_prog) {
2352 shader = crocus_find_cached_shader(ice, CROCUS_CACHE_FF_GS,
2353 sizeof(key), &key);
2354 if (!shader)
2355 shader = crocus_compile_ff_gs(ice, &key);
2356 }
2357 if (old != shader) {
2358 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_GS;
2359 if (!!old != !!shader)
2360 ice->state.dirty |= CROCUS_DIRTY_GEN6_URB;
2361 ice->shaders.ff_gs_prog = shader;
2362 if (shader) {
2363 const struct elk_ff_gs_prog_data *gs_prog_data = (struct elk_ff_gs_prog_data *)ice->shaders.ff_gs_prog->prog_data;
2364 ice->state.last_xfb_verts_per_prim = gs_prog_data->svbi_postincrement_value;
2365 }
2366 }
2367 }
2368
2369 // XXX: crocus_compiled_shaders are space-leaking :(
2370 // XXX: do remember to unbind them if deleting them.
2371
2372 /**
2373 * Update the current shader variants for the given state.
2374 *
2375 * This should be called on every draw call to ensure that the correct
2376 * shaders are bound. It will also flag any dirty state triggered by
2377 * swapping out those shaders.
2378 */
2379 bool
crocus_update_compiled_shaders(struct crocus_context * ice)2380 crocus_update_compiled_shaders(struct crocus_context *ice)
2381 {
2382 struct crocus_screen *screen = (void *) ice->ctx.screen;
2383 const uint64_t stage_dirty = ice->state.stage_dirty;
2384
2385 struct elk_vue_prog_data *old_prog_datas[4];
2386 if (!(ice->state.dirty & CROCUS_DIRTY_GEN6_URB)) {
2387 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++)
2388 old_prog_datas[i] = get_vue_prog_data(ice, i);
2389 }
2390
2391 if (stage_dirty & (CROCUS_STAGE_DIRTY_UNCOMPILED_TCS |
2392 CROCUS_STAGE_DIRTY_UNCOMPILED_TES)) {
2393 struct crocus_uncompiled_shader *tes =
2394 ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
2395 if (tes) {
2396 crocus_update_compiled_tcs(ice);
2397 crocus_update_compiled_tes(ice);
2398 } else {
2399 ice->shaders.prog[CROCUS_CACHE_TCS] = NULL;
2400 ice->shaders.prog[CROCUS_CACHE_TES] = NULL;
2401 ice->state.stage_dirty |=
2402 CROCUS_STAGE_DIRTY_TCS | CROCUS_STAGE_DIRTY_TES |
2403 CROCUS_STAGE_DIRTY_BINDINGS_TCS | CROCUS_STAGE_DIRTY_BINDINGS_TES |
2404 CROCUS_STAGE_DIRTY_CONSTANTS_TCS | CROCUS_STAGE_DIRTY_CONSTANTS_TES;
2405 }
2406 }
2407
2408 if (stage_dirty & CROCUS_STAGE_DIRTY_UNCOMPILED_VS)
2409 crocus_update_compiled_vs(ice);
2410 if (stage_dirty & CROCUS_STAGE_DIRTY_UNCOMPILED_GS)
2411 crocus_update_compiled_gs(ice);
2412
2413 if (stage_dirty & (CROCUS_STAGE_DIRTY_UNCOMPILED_GS |
2414 CROCUS_STAGE_DIRTY_UNCOMPILED_TES)) {
2415 const struct crocus_compiled_shader *gs =
2416 ice->shaders.prog[MESA_SHADER_GEOMETRY];
2417 const struct crocus_compiled_shader *tes =
2418 ice->shaders.prog[MESA_SHADER_TESS_EVAL];
2419
2420 bool points_or_lines = false;
2421
2422 if (gs) {
2423 const struct elk_gs_prog_data *gs_prog_data = (void *) gs->prog_data;
2424 points_or_lines =
2425 gs_prog_data->output_topology == _3DPRIM_POINTLIST ||
2426 gs_prog_data->output_topology == _3DPRIM_LINESTRIP;
2427 } else if (tes) {
2428 const struct elk_tes_prog_data *tes_data = (void *) tes->prog_data;
2429 points_or_lines =
2430 tes_data->output_topology == INTEL_TESS_OUTPUT_TOPOLOGY_LINE ||
2431 tes_data->output_topology == INTEL_TESS_OUTPUT_TOPOLOGY_POINT;
2432 }
2433
2434 if (ice->shaders.output_topology_is_points_or_lines != points_or_lines) {
2435 /* Outbound to XY Clip enables */
2436 ice->shaders.output_topology_is_points_or_lines = points_or_lines;
2437 ice->state.dirty |= CROCUS_DIRTY_CLIP;
2438 }
2439 }
2440
2441 if (!ice->shaders.prog[MESA_SHADER_VERTEX])
2442 return false;
2443
2444 gl_shader_stage last_stage = last_vue_stage(ice);
2445 struct crocus_compiled_shader *shader = ice->shaders.prog[last_stage];
2446 struct crocus_uncompiled_shader *ish = ice->shaders.uncompiled[last_stage];
2447 update_last_vue_map(ice, shader->prog_data);
2448 if (ice->state.streamout != shader->streamout) {
2449 ice->state.streamout = shader->streamout;
2450 ice->state.dirty |= CROCUS_DIRTY_SO_DECL_LIST | CROCUS_DIRTY_STREAMOUT;
2451 }
2452
2453 if (ice->state.streamout_active) {
2454 screen->vtbl.update_so_strides(ice, ish->stream_output.stride);
2455 }
2456
2457 /* use ice->state version as last_vue_map can dirty this bit */
2458 if (ice->state.stage_dirty & CROCUS_STAGE_DIRTY_UNCOMPILED_FS)
2459 crocus_update_compiled_fs(ice);
2460
2461 if (screen->devinfo.ver <= 6) {
2462 if (ice->state.dirty & CROCUS_DIRTY_GEN4_FF_GS_PROG &&
2463 !ice->shaders.prog[MESA_SHADER_GEOMETRY])
2464 crocus_update_compiled_ff_gs(ice);
2465 }
2466
2467 if (screen->devinfo.ver < 6) {
2468 if (ice->state.dirty & CROCUS_DIRTY_GEN4_CLIP_PROG)
2469 crocus_update_compiled_clip(ice);
2470 if (ice->state.dirty & CROCUS_DIRTY_GEN4_SF_PROG)
2471 crocus_update_compiled_sf(ice);
2472 }
2473
2474
2475 /* Changing shader interfaces may require a URB configuration. */
2476 if (!(ice->state.dirty & CROCUS_DIRTY_GEN6_URB)) {
2477 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
2478 struct elk_vue_prog_data *old = old_prog_datas[i];
2479 struct elk_vue_prog_data *new = get_vue_prog_data(ice, i);
2480 if (!!old != !!new ||
2481 (new && new->urb_entry_size != old->urb_entry_size)) {
2482 ice->state.dirty |= CROCUS_DIRTY_GEN6_URB;
2483 break;
2484 }
2485 }
2486 }
2487
2488 if (ice->state.stage_dirty & CROCUS_RENDER_STAGE_DIRTY_CONSTANTS) {
2489 for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
2490 if (ice->state.stage_dirty & (CROCUS_STAGE_DIRTY_CONSTANTS_VS << i))
2491 crocus_update_pull_constant_descriptors(ice, i);
2492 }
2493 }
2494 return true;
2495 }
2496
2497 static struct crocus_compiled_shader *
crocus_compile_cs(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,const struct elk_cs_prog_key * key)2498 crocus_compile_cs(struct crocus_context *ice,
2499 struct crocus_uncompiled_shader *ish,
2500 const struct elk_cs_prog_key *key)
2501 {
2502 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2503 const struct elk_compiler *compiler = screen->compiler;
2504 void *mem_ctx = ralloc_context(NULL);
2505 struct elk_cs_prog_data *cs_prog_data =
2506 rzalloc(mem_ctx, struct elk_cs_prog_data);
2507 struct elk_stage_prog_data *prog_data = &cs_prog_data->base;
2508 enum elk_param_builtin *system_values;
2509 const struct intel_device_info *devinfo = &screen->devinfo;
2510 unsigned num_system_values;
2511 unsigned num_cbufs;
2512
2513 nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
2514
2515 NIR_PASS_V(nir, elk_nir_lower_cs_intrinsics, devinfo, cs_prog_data);
2516
2517 crocus_setup_uniforms(devinfo, mem_ctx, nir, prog_data, &system_values,
2518 &num_system_values, &num_cbufs);
2519 crocus_lower_swizzles(nir, &key->base.tex);
2520 struct crocus_binding_table bt;
2521 crocus_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
2522 num_system_values, num_cbufs, &key->base.tex);
2523
2524 struct elk_compile_cs_params params = {
2525 .base = {
2526 .mem_ctx = mem_ctx,
2527 .nir = nir,
2528 .log_data = &ice->dbg,
2529 },
2530 .key = key,
2531 .prog_data = cs_prog_data,
2532 };
2533
2534 const unsigned *program =
2535 elk_compile_cs(compiler, ¶ms);
2536 if (program == NULL) {
2537 dbg_printf("Failed to compile compute shader: %s\n", params.base.error_str);
2538 ralloc_free(mem_ctx);
2539 return false;
2540 }
2541
2542 if (ish->compiled_once) {
2543 crocus_debug_recompile(ice, &nir->info, &key->base);
2544 } else {
2545 ish->compiled_once = true;
2546 }
2547
2548 struct crocus_compiled_shader *shader =
2549 crocus_upload_shader(ice, CROCUS_CACHE_CS, sizeof(*key), key, program,
2550 prog_data->program_size,
2551 prog_data, sizeof(*cs_prog_data), NULL,
2552 system_values, num_system_values,
2553 num_cbufs, &bt);
2554
2555 crocus_disk_cache_store(screen->disk_cache, ish, shader,
2556 ice->shaders.cache_bo_map,
2557 key, sizeof(*key));
2558
2559 ralloc_free(mem_ctx);
2560 return shader;
2561 }
2562
2563 static void
crocus_update_compiled_cs(struct crocus_context * ice)2564 crocus_update_compiled_cs(struct crocus_context *ice)
2565 {
2566 struct crocus_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
2567 struct crocus_uncompiled_shader *ish =
2568 ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
2569 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2570 const struct intel_device_info *devinfo = &screen->devinfo;
2571 struct elk_cs_prog_key key = { KEY_INIT() };
2572
2573 if (ish->nos & (1ull << CROCUS_NOS_TEXTURES))
2574 crocus_populate_sampler_prog_key_data(ice, devinfo, MESA_SHADER_COMPUTE, ish,
2575 ish->nir->info.uses_texture_gather, &key.base.tex);
2576 screen->vtbl.populate_cs_key(ice, &key);
2577
2578 struct crocus_compiled_shader *old = ice->shaders.prog[CROCUS_CACHE_CS];
2579 struct crocus_compiled_shader *shader =
2580 crocus_find_cached_shader(ice, CROCUS_CACHE_CS, sizeof(key), &key);
2581
2582 if (!shader)
2583 shader = crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key));
2584
2585 if (!shader)
2586 shader = crocus_compile_cs(ice, ish, &key);
2587
2588 if (old != shader) {
2589 ice->shaders.prog[CROCUS_CACHE_CS] = shader;
2590 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_CS |
2591 CROCUS_STAGE_DIRTY_BINDINGS_CS |
2592 CROCUS_STAGE_DIRTY_CONSTANTS_CS;
2593 shs->sysvals_need_upload = true;
2594 }
2595 }
2596
2597 void
crocus_update_compiled_compute_shader(struct crocus_context * ice)2598 crocus_update_compiled_compute_shader(struct crocus_context *ice)
2599 {
2600 if (ice->state.stage_dirty & CROCUS_STAGE_DIRTY_UNCOMPILED_CS)
2601 crocus_update_compiled_cs(ice);
2602
2603 if (ice->state.stage_dirty & CROCUS_STAGE_DIRTY_CONSTANTS_CS)
2604 crocus_update_pull_constant_descriptors(ice, MESA_SHADER_COMPUTE);
2605 }
2606
2607 void
crocus_fill_cs_push_const_buffer(struct elk_cs_prog_data * cs_prog_data,unsigned threads,uint32_t * dst)2608 crocus_fill_cs_push_const_buffer(struct elk_cs_prog_data *cs_prog_data,
2609 unsigned threads,
2610 uint32_t *dst)
2611 {
2612 assert(elk_cs_push_const_total_size(cs_prog_data, threads) > 0);
2613 assert(cs_prog_data->push.cross_thread.size == 0);
2614 assert(cs_prog_data->push.per_thread.dwords == 1);
2615 assert(cs_prog_data->base.param[0] == ELK_PARAM_BUILTIN_SUBGROUP_ID);
2616 for (unsigned t = 0; t < threads; t++)
2617 dst[8 * t] = t;
2618 }
2619
2620 /**
2621 * Allocate scratch BOs as needed for the given per-thread size and stage.
2622 */
2623 struct crocus_bo *
crocus_get_scratch_space(struct crocus_context * ice,unsigned per_thread_scratch,gl_shader_stage stage)2624 crocus_get_scratch_space(struct crocus_context *ice,
2625 unsigned per_thread_scratch,
2626 gl_shader_stage stage)
2627 {
2628 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
2629 struct crocus_bufmgr *bufmgr = screen->bufmgr;
2630 const struct intel_device_info *devinfo = &screen->devinfo;
2631
2632 unsigned encoded_size = ffs(per_thread_scratch) - 11;
2633 assert(encoded_size < (1 << 16));
2634
2635 struct crocus_bo **bop = &ice->shaders.scratch_bos[encoded_size][stage];
2636
2637 if (!*bop) {
2638 assert(stage < ARRAY_SIZE(devinfo->max_scratch_ids));
2639 uint32_t size = per_thread_scratch * devinfo->max_scratch_ids[stage];
2640 *bop = crocus_bo_alloc(bufmgr, "scratch", size);
2641 }
2642
2643 return *bop;
2644 }
2645
2646 /* ------------------------------------------------------------------- */
2647
2648 /**
2649 * The pipe->create_[stage]_state() driver hooks.
2650 *
2651 * Performs basic NIR preprocessing, records any state dependencies, and
2652 * returns an crocus_uncompiled_shader as the Gallium CSO.
2653 *
2654 * Actual shader compilation to assembly happens later, at first use.
2655 */
2656 static void *
crocus_create_uncompiled_shader(struct pipe_context * ctx,nir_shader * nir,const struct pipe_stream_output_info * so_info)2657 crocus_create_uncompiled_shader(struct pipe_context *ctx,
2658 nir_shader *nir,
2659 const struct pipe_stream_output_info *so_info)
2660 {
2661 struct crocus_screen *screen = (struct crocus_screen *)ctx->screen;
2662 const struct intel_device_info *devinfo = &screen->devinfo;
2663 struct crocus_uncompiled_shader *ish =
2664 calloc(1, sizeof(struct crocus_uncompiled_shader));
2665 if (!ish)
2666 return NULL;
2667
2668 if (devinfo->ver >= 6)
2669 NIR_PASS(ish->needs_edge_flag, nir, crocus_fix_edge_flags);
2670 else
2671 ish->needs_edge_flag = false;
2672
2673 struct elk_nir_compiler_opts opts = {};
2674 elk_preprocess_nir(screen->compiler, nir, &opts);
2675
2676 NIR_PASS_V(nir, elk_nir_lower_storage_image,
2677 &(struct elk_nir_lower_storage_image_opts) {
2678 .devinfo = devinfo,
2679 .lower_loads = true,
2680 .lower_stores = true,
2681 .lower_atomics = true,
2682 .lower_get_size = true,
2683 });
2684 NIR_PASS_V(nir, crocus_lower_storage_image_derefs);
2685
2686 nir_sweep(nir);
2687
2688 ish->program_id = get_new_program_id(screen);
2689 ish->nir = nir;
2690 if (so_info) {
2691 memcpy(&ish->stream_output, so_info, sizeof(*so_info));
2692 update_so_info(&ish->stream_output, nir->info.outputs_written);
2693 }
2694
2695 if (screen->disk_cache) {
2696 /* Serialize the NIR to a binary blob that we can hash for the disk
2697 * cache. Drop unnecessary information (like variable names)
2698 * so the serialized NIR is smaller, and also to let us detect more
2699 * isomorphic shaders when hashing, increasing cache hits.
2700 */
2701 struct blob blob;
2702 blob_init(&blob);
2703 nir_serialize(&blob, nir, true);
2704 _mesa_sha1_compute(blob.data, blob.size, ish->nir_sha1);
2705 blob_finish(&blob);
2706 }
2707
2708 return ish;
2709 }
2710
2711 static struct crocus_uncompiled_shader *
crocus_create_shader_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2712 crocus_create_shader_state(struct pipe_context *ctx,
2713 const struct pipe_shader_state *state)
2714 {
2715 struct nir_shader *nir;
2716
2717 if (state->type == PIPE_SHADER_IR_TGSI)
2718 nir = tgsi_to_nir(state->tokens, ctx->screen, false);
2719 else
2720 nir = state->ir.nir;
2721
2722 return crocus_create_uncompiled_shader(ctx, nir, &state->stream_output);
2723 }
2724
2725 static void *
crocus_create_vs_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2726 crocus_create_vs_state(struct pipe_context *ctx,
2727 const struct pipe_shader_state *state)
2728 {
2729 struct crocus_context *ice = (void *) ctx;
2730 struct crocus_screen *screen = (void *) ctx->screen;
2731 struct crocus_uncompiled_shader *ish = crocus_create_shader_state(ctx, state);
2732
2733 ish->nos |= (1ull << CROCUS_NOS_TEXTURES);
2734 /* User clip planes or gen5 sprite coord enable */
2735 if (ish->nir->info.clip_distance_array_size == 0 ||
2736 screen->devinfo.ver <= 5)
2737 ish->nos |= (1ull << CROCUS_NOS_RASTERIZER);
2738
2739 if (screen->devinfo.verx10 < 75)
2740 ish->nos |= (1ull << CROCUS_NOS_VERTEX_ELEMENTS);
2741
2742 if (screen->precompile) {
2743 struct elk_vs_prog_key key = { KEY_INIT() };
2744
2745 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2746 crocus_compile_vs(ice, ish, &key);
2747 }
2748
2749 return ish;
2750 }
2751
2752 static void *
crocus_create_tcs_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2753 crocus_create_tcs_state(struct pipe_context *ctx,
2754 const struct pipe_shader_state *state)
2755 {
2756 struct crocus_context *ice = (void *) ctx;
2757 struct crocus_screen *screen = (void *) ctx->screen;
2758 struct crocus_uncompiled_shader *ish = crocus_create_shader_state(ctx, state);
2759 struct shader_info *info = &ish->nir->info;
2760
2761 ish->nos |= (1ull << CROCUS_NOS_TEXTURES);
2762 if (screen->precompile) {
2763 struct elk_tcs_prog_key key = {
2764 KEY_INIT(),
2765 // XXX: make sure the linker fills this out from the TES...
2766 ._tes_primitive_mode =
2767 info->tess._primitive_mode ? info->tess._primitive_mode
2768 : TESS_PRIMITIVE_TRIANGLES,
2769 .outputs_written = info->outputs_written,
2770 .patch_outputs_written = info->patch_outputs_written,
2771 };
2772
2773 key.input_vertices = info->tess.tcs_vertices_out;
2774
2775 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2776 crocus_compile_tcs(ice, ish, &key);
2777 }
2778
2779 return ish;
2780 }
2781
2782 static void *
crocus_create_tes_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2783 crocus_create_tes_state(struct pipe_context *ctx,
2784 const struct pipe_shader_state *state)
2785 {
2786 struct crocus_context *ice = (void *) ctx;
2787 struct crocus_screen *screen = (void *) ctx->screen;
2788 struct crocus_uncompiled_shader *ish = crocus_create_shader_state(ctx, state);
2789 struct shader_info *info = &ish->nir->info;
2790
2791 ish->nos |= (1ull << CROCUS_NOS_TEXTURES);
2792 /* User clip planes */
2793 if (ish->nir->info.clip_distance_array_size == 0)
2794 ish->nos |= (1ull << CROCUS_NOS_RASTERIZER);
2795
2796 if (screen->precompile) {
2797 struct elk_tes_prog_key key = {
2798 KEY_INIT(),
2799 // XXX: not ideal, need TCS output/TES input unification
2800 .inputs_read = info->inputs_read,
2801 .patch_inputs_read = info->patch_inputs_read,
2802 };
2803
2804 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2805 crocus_compile_tes(ice, ish, &key);
2806 }
2807
2808 return ish;
2809 }
2810
2811 static void *
crocus_create_gs_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2812 crocus_create_gs_state(struct pipe_context *ctx,
2813 const struct pipe_shader_state *state)
2814 {
2815 struct crocus_context *ice = (void *) ctx;
2816 struct crocus_screen *screen = (void *) ctx->screen;
2817 struct crocus_uncompiled_shader *ish = crocus_create_shader_state(ctx, state);
2818
2819 ish->nos |= (1ull << CROCUS_NOS_TEXTURES);
2820 /* User clip planes */
2821 if (ish->nir->info.clip_distance_array_size == 0)
2822 ish->nos |= (1ull << CROCUS_NOS_RASTERIZER);
2823
2824 if (screen->precompile) {
2825 struct elk_gs_prog_key key = { KEY_INIT() };
2826
2827 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2828 crocus_compile_gs(ice, ish, &key);
2829 }
2830
2831 return ish;
2832 }
2833
2834 static void *
crocus_create_fs_state(struct pipe_context * ctx,const struct pipe_shader_state * state)2835 crocus_create_fs_state(struct pipe_context *ctx,
2836 const struct pipe_shader_state *state)
2837 {
2838 struct crocus_context *ice = (void *) ctx;
2839 struct crocus_screen *screen = (void *) ctx->screen;
2840 struct crocus_uncompiled_shader *ish = crocus_create_shader_state(ctx, state);
2841 struct shader_info *info = &ish->nir->info;
2842
2843 ish->nos |= (1ull << CROCUS_NOS_FRAMEBUFFER) |
2844 (1ull << CROCUS_NOS_DEPTH_STENCIL_ALPHA) |
2845 (1ull << CROCUS_NOS_RASTERIZER) |
2846 (1ull << CROCUS_NOS_TEXTURES) |
2847 (1ull << CROCUS_NOS_BLEND);
2848
2849 /* The program key needs the VUE map if there are > 16 inputs or gen4/5 */
2850 if (screen->devinfo.ver < 6 || util_bitcount64(ish->nir->info.inputs_read &
2851 ELK_FS_VARYING_INPUT_MASK) > 16) {
2852 ish->nos |= (1ull << CROCUS_NOS_LAST_VUE_MAP);
2853 }
2854
2855 if (screen->precompile) {
2856 const uint64_t color_outputs = info->outputs_written &
2857 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH) |
2858 BITFIELD64_BIT(FRAG_RESULT_STENCIL) |
2859 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK));
2860
2861 bool can_rearrange_varyings =
2862 screen->devinfo.ver > 6 && util_bitcount64(info->inputs_read & ELK_FS_VARYING_INPUT_MASK) <= 16;
2863
2864 const struct intel_device_info *devinfo = &screen->devinfo;
2865 struct elk_wm_prog_key key = {
2866 KEY_INIT(),
2867 .nr_color_regions = util_bitcount(color_outputs),
2868 .coherent_fb_fetch = false,
2869 .ignore_sample_mask_out = screen->devinfo.ver < 6 ? 1 : 0,
2870 .input_slots_valid =
2871 can_rearrange_varyings ? 0 : info->inputs_read | VARYING_BIT_POS,
2872 };
2873
2874 struct intel_vue_map vue_map;
2875 if (devinfo->ver < 6) {
2876 elk_compute_vue_map(devinfo, &vue_map,
2877 info->inputs_read | VARYING_BIT_POS,
2878 false, /* pos slots */ 1);
2879 }
2880 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2881 crocus_compile_fs(ice, ish, &key, &vue_map);
2882 }
2883
2884 return ish;
2885 }
2886
2887 static void *
crocus_create_compute_state(struct pipe_context * ctx,const struct pipe_compute_state * state)2888 crocus_create_compute_state(struct pipe_context *ctx,
2889 const struct pipe_compute_state *state)
2890 {
2891 assert(state->ir_type == PIPE_SHADER_IR_NIR);
2892
2893 struct crocus_context *ice = (void *) ctx;
2894 struct crocus_screen *screen = (void *) ctx->screen;
2895 struct crocus_uncompiled_shader *ish =
2896 crocus_create_uncompiled_shader(ctx, (void *) state->prog, NULL);
2897
2898 ish->nos |= (1ull << CROCUS_NOS_TEXTURES);
2899 // XXX: disallow more than 64KB of shared variables
2900
2901 if (screen->precompile) {
2902 struct elk_cs_prog_key key = { KEY_INIT() };
2903
2904 if (!crocus_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
2905 crocus_compile_cs(ice, ish, &key);
2906 }
2907
2908 return ish;
2909 }
2910
2911 /**
2912 * The pipe->delete_[stage]_state() driver hooks.
2913 *
2914 * Frees the crocus_uncompiled_shader.
2915 */
2916 static void
crocus_delete_shader_state(struct pipe_context * ctx,void * state,gl_shader_stage stage)2917 crocus_delete_shader_state(struct pipe_context *ctx, void *state, gl_shader_stage stage)
2918 {
2919 struct crocus_uncompiled_shader *ish = state;
2920 struct crocus_context *ice = (void *) ctx;
2921
2922 if (ice->shaders.uncompiled[stage] == ish) {
2923 ice->shaders.uncompiled[stage] = NULL;
2924 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2925 }
2926
2927 if (ish->const_data) {
2928 pipe_resource_reference(&ish->const_data, NULL);
2929 pipe_resource_reference(&ish->const_data_state.res, NULL);
2930 }
2931
2932 ralloc_free(ish->nir);
2933 free(ish);
2934 }
2935
2936 static void
crocus_delete_vs_state(struct pipe_context * ctx,void * state)2937 crocus_delete_vs_state(struct pipe_context *ctx, void *state)
2938 {
2939 crocus_delete_shader_state(ctx, state, MESA_SHADER_VERTEX);
2940 }
2941
2942 static void
crocus_delete_tcs_state(struct pipe_context * ctx,void * state)2943 crocus_delete_tcs_state(struct pipe_context *ctx, void *state)
2944 {
2945 crocus_delete_shader_state(ctx, state, MESA_SHADER_TESS_CTRL);
2946 }
2947
2948 static void
crocus_delete_tes_state(struct pipe_context * ctx,void * state)2949 crocus_delete_tes_state(struct pipe_context *ctx, void *state)
2950 {
2951 crocus_delete_shader_state(ctx, state, MESA_SHADER_TESS_EVAL);
2952 }
2953
2954 static void
crocus_delete_gs_state(struct pipe_context * ctx,void * state)2955 crocus_delete_gs_state(struct pipe_context *ctx, void *state)
2956 {
2957 crocus_delete_shader_state(ctx, state, MESA_SHADER_GEOMETRY);
2958 }
2959
2960 static void
crocus_delete_fs_state(struct pipe_context * ctx,void * state)2961 crocus_delete_fs_state(struct pipe_context *ctx, void *state)
2962 {
2963 crocus_delete_shader_state(ctx, state, MESA_SHADER_FRAGMENT);
2964 }
2965
2966 static void
crocus_delete_cs_state(struct pipe_context * ctx,void * state)2967 crocus_delete_cs_state(struct pipe_context *ctx, void *state)
2968 {
2969 crocus_delete_shader_state(ctx, state, MESA_SHADER_COMPUTE);
2970 }
2971
2972 /**
2973 * The pipe->bind_[stage]_state() driver hook.
2974 *
2975 * Binds an uncompiled shader as the current one for a particular stage.
2976 * Updates dirty tracking to account for the shader's NOS.
2977 */
2978 static void
bind_shader_state(struct crocus_context * ice,struct crocus_uncompiled_shader * ish,gl_shader_stage stage)2979 bind_shader_state(struct crocus_context *ice,
2980 struct crocus_uncompiled_shader *ish,
2981 gl_shader_stage stage)
2982 {
2983 uint64_t dirty_bit = CROCUS_STAGE_DIRTY_UNCOMPILED_VS << stage;
2984 const uint64_t nos = ish ? ish->nos : 0;
2985
2986 const struct shader_info *old_info = crocus_get_shader_info(ice, stage);
2987 const struct shader_info *new_info = ish ? &ish->nir->info : NULL;
2988
2989 if ((old_info ? BITSET_LAST_BIT(old_info->textures_used) : 0) !=
2990 (new_info ? BITSET_LAST_BIT(new_info->textures_used) : 0)) {
2991 ice->state.stage_dirty |= CROCUS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
2992 }
2993
2994 ice->shaders.uncompiled[stage] = ish;
2995 ice->state.stage_dirty |= dirty_bit;
2996
2997 /* Record that CSOs need to mark CROCUS_DIRTY_UNCOMPILED_XS when they change
2998 * (or that they no longer need to do so).
2999 */
3000 for (int i = 0; i < CROCUS_NOS_COUNT; i++) {
3001 if (nos & (1 << i))
3002 ice->state.stage_dirty_for_nos[i] |= dirty_bit;
3003 else
3004 ice->state.stage_dirty_for_nos[i] &= ~dirty_bit;
3005 }
3006 }
3007
3008 static void
crocus_bind_vs_state(struct pipe_context * ctx,void * state)3009 crocus_bind_vs_state(struct pipe_context *ctx, void *state)
3010 {
3011 struct crocus_context *ice = (struct crocus_context *)ctx;
3012 struct crocus_uncompiled_shader *new_ish = state;
3013 struct crocus_screen *screen = (struct crocus_screen *)ice->ctx.screen;
3014 const struct intel_device_info *devinfo = &screen->devinfo;
3015
3016 if (new_ish &&
3017 ice->state.window_space_position !=
3018 new_ish->nir->info.vs.window_space_position) {
3019 ice->state.window_space_position =
3020 new_ish->nir->info.vs.window_space_position;
3021
3022 ice->state.dirty |= CROCUS_DIRTY_CLIP |
3023 CROCUS_DIRTY_RASTER |
3024 CROCUS_DIRTY_CC_VIEWPORT;
3025 }
3026
3027 if (devinfo->ver == 6) {
3028 ice->state.stage_dirty |= CROCUS_DIRTY_GEN4_FF_GS_PROG;
3029 }
3030
3031 bind_shader_state((void *) ctx, state, MESA_SHADER_VERTEX);
3032 }
3033
3034 static void
crocus_bind_tcs_state(struct pipe_context * ctx,void * state)3035 crocus_bind_tcs_state(struct pipe_context *ctx, void *state)
3036 {
3037 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_CTRL);
3038 }
3039
3040 static void
crocus_bind_tes_state(struct pipe_context * ctx,void * state)3041 crocus_bind_tes_state(struct pipe_context *ctx, void *state)
3042 {
3043 struct crocus_context *ice = (struct crocus_context *)ctx;
3044
3045 /* Enabling/disabling optional stages requires a URB reconfiguration. */
3046 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
3047 ice->state.dirty |= CROCUS_DIRTY_GEN6_URB;
3048
3049 bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_EVAL);
3050 }
3051
3052 static void
crocus_bind_gs_state(struct pipe_context * ctx,void * state)3053 crocus_bind_gs_state(struct pipe_context *ctx, void *state)
3054 {
3055 struct crocus_context *ice = (struct crocus_context *)ctx;
3056
3057 /* Enabling/disabling optional stages requires a URB reconfiguration. */
3058 if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
3059 ice->state.dirty |= CROCUS_DIRTY_GEN6_URB;
3060
3061 bind_shader_state((void *) ctx, state, MESA_SHADER_GEOMETRY);
3062 }
3063
3064 static void
crocus_bind_fs_state(struct pipe_context * ctx,void * state)3065 crocus_bind_fs_state(struct pipe_context *ctx, void *state)
3066 {
3067 struct crocus_context *ice = (struct crocus_context *) ctx;
3068 struct crocus_screen *screen = (struct crocus_screen *) ctx->screen;
3069 const struct intel_device_info *devinfo = &screen->devinfo;
3070 struct crocus_uncompiled_shader *old_ish =
3071 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
3072 struct crocus_uncompiled_shader *new_ish = state;
3073
3074 const unsigned color_bits =
3075 BITFIELD64_BIT(FRAG_RESULT_COLOR) |
3076 BITFIELD64_RANGE(FRAG_RESULT_DATA0, ELK_MAX_DRAW_BUFFERS);
3077
3078 /* Fragment shader outputs influence HasWriteableRT */
3079 if (!old_ish || !new_ish ||
3080 (old_ish->nir->info.outputs_written & color_bits) !=
3081 (new_ish->nir->info.outputs_written & color_bits)) {
3082 if (devinfo->ver == 8)
3083 ice->state.dirty |= CROCUS_DIRTY_GEN8_PS_BLEND;
3084 else
3085 ice->state.dirty |= CROCUS_DIRTY_WM;
3086 }
3087
3088 if (devinfo->ver == 8)
3089 ice->state.dirty |= CROCUS_DIRTY_GEN8_PMA_FIX;
3090 bind_shader_state((void *) ctx, state, MESA_SHADER_FRAGMENT);
3091 }
3092
3093 static void
crocus_bind_cs_state(struct pipe_context * ctx,void * state)3094 crocus_bind_cs_state(struct pipe_context *ctx, void *state)
3095 {
3096 bind_shader_state((void *) ctx, state, MESA_SHADER_COMPUTE);
3097 }
3098
3099 void
crocus_init_program_functions(struct pipe_context * ctx)3100 crocus_init_program_functions(struct pipe_context *ctx)
3101 {
3102 ctx->create_vs_state = crocus_create_vs_state;
3103 ctx->create_tcs_state = crocus_create_tcs_state;
3104 ctx->create_tes_state = crocus_create_tes_state;
3105 ctx->create_gs_state = crocus_create_gs_state;
3106 ctx->create_fs_state = crocus_create_fs_state;
3107 ctx->create_compute_state = crocus_create_compute_state;
3108
3109 ctx->delete_vs_state = crocus_delete_vs_state;
3110 ctx->delete_tcs_state = crocus_delete_tcs_state;
3111 ctx->delete_tes_state = crocus_delete_tes_state;
3112 ctx->delete_gs_state = crocus_delete_gs_state;
3113 ctx->delete_fs_state = crocus_delete_fs_state;
3114 ctx->delete_compute_state = crocus_delete_cs_state;
3115
3116 ctx->bind_vs_state = crocus_bind_vs_state;
3117 ctx->bind_tcs_state = crocus_bind_tcs_state;
3118 ctx->bind_tes_state = crocus_bind_tes_state;
3119 ctx->bind_gs_state = crocus_bind_gs_state;
3120 ctx->bind_fs_state = crocus_bind_fs_state;
3121 ctx->bind_compute_state = crocus_bind_cs_state;
3122 }
3123