1 /*
2 * Copyright © 2020 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "intel_nir.h"
25 #include "brw_nir_rt.h"
26 #include "brw_nir_rt_builder.h"
27 #include "intel_nir.h"
28
29 static bool
resize_deref(nir_builder * b,nir_deref_instr * deref,unsigned num_components,unsigned bit_size)30 resize_deref(nir_builder *b, nir_deref_instr *deref,
31 unsigned num_components, unsigned bit_size)
32 {
33 if (deref->def.num_components == num_components &&
34 deref->def.bit_size == bit_size)
35 return false;
36
37 /* NIR requires array indices have to match the deref bit size */
38 if (deref->def.bit_size != bit_size &&
39 (deref->deref_type == nir_deref_type_array ||
40 deref->deref_type == nir_deref_type_ptr_as_array)) {
41 b->cursor = nir_before_instr(&deref->instr);
42 nir_def *idx;
43 if (nir_src_is_const(deref->arr.index)) {
44 idx = nir_imm_intN_t(b, nir_src_as_int(deref->arr.index), bit_size);
45 } else {
46 idx = nir_i2iN(b, deref->arr.index.ssa, bit_size);
47 }
48 nir_src_rewrite(&deref->arr.index, idx);
49 }
50
51 deref->def.num_components = num_components;
52 deref->def.bit_size = bit_size;
53
54 return true;
55 }
56
57 static bool
lower_rt_io_derefs(nir_shader * shader)58 lower_rt_io_derefs(nir_shader *shader)
59 {
60 nir_function_impl *impl = nir_shader_get_entrypoint(shader);
61
62 bool progress = false;
63
64 unsigned num_shader_call_vars = 0;
65 nir_foreach_variable_with_modes(var, shader, nir_var_shader_call_data)
66 num_shader_call_vars++;
67
68 unsigned num_ray_hit_attrib_vars = 0;
69 nir_foreach_variable_with_modes(var, shader, nir_var_ray_hit_attrib)
70 num_ray_hit_attrib_vars++;
71
72 /* At most one payload is allowed because it's an input. Technically, this
73 * is also true for hit attribute variables. However, after we inline an
74 * any-hit shader into an intersection shader, we can end up with multiple
75 * hit attribute variables. They'll end up mapping to a cast from the same
76 * base pointer so this is fine.
77 */
78 assert(num_shader_call_vars <= 1);
79
80 nir_builder b = nir_builder_at(nir_before_impl(impl));
81
82 nir_def *call_data_addr = NULL;
83 if (num_shader_call_vars > 0) {
84 assert(shader->scratch_size >= BRW_BTD_STACK_CALLEE_DATA_SIZE);
85 call_data_addr =
86 brw_nir_rt_load_scratch(&b, BRW_BTD_STACK_CALL_DATA_PTR_OFFSET, 8,
87 1, 64);
88 progress = true;
89 }
90
91 gl_shader_stage stage = shader->info.stage;
92 nir_def *hit_attrib_addr = NULL;
93 if (num_ray_hit_attrib_vars > 0) {
94 assert(stage == MESA_SHADER_ANY_HIT ||
95 stage == MESA_SHADER_CLOSEST_HIT ||
96 stage == MESA_SHADER_INTERSECTION);
97 nir_def *hit_addr =
98 brw_nir_rt_mem_hit_addr(&b, stage == MESA_SHADER_CLOSEST_HIT);
99 /* The vec2 barycentrics are in 2nd and 3rd dwords of MemHit */
100 nir_def *bary_addr = nir_iadd_imm(&b, hit_addr, 4);
101 hit_attrib_addr = nir_bcsel(&b, nir_load_leaf_procedural_intel(&b),
102 brw_nir_rt_hit_attrib_data_addr(&b),
103 bary_addr);
104 progress = true;
105 }
106
107 nir_foreach_block(block, impl) {
108 nir_foreach_instr_safe(instr, block) {
109 if (instr->type != nir_instr_type_deref)
110 continue;
111
112 nir_deref_instr *deref = nir_instr_as_deref(instr);
113 if (nir_deref_mode_is(deref, nir_var_shader_call_data)) {
114 deref->modes = nir_var_function_temp;
115 if (deref->deref_type == nir_deref_type_var) {
116 b.cursor = nir_before_instr(&deref->instr);
117 nir_deref_instr *cast =
118 nir_build_deref_cast(&b, call_data_addr,
119 nir_var_function_temp,
120 deref->var->type, 0);
121 nir_def_replace(&deref->def, &cast->def);
122 progress = true;
123 }
124 } else if (nir_deref_mode_is(deref, nir_var_ray_hit_attrib)) {
125 deref->modes = nir_var_function_temp;
126 if (deref->deref_type == nir_deref_type_var) {
127 b.cursor = nir_before_instr(&deref->instr);
128 nir_deref_instr *cast =
129 nir_build_deref_cast(&b, hit_attrib_addr,
130 nir_var_function_temp,
131 deref->type, 0);
132 nir_def_replace(&deref->def, &cast->def);
133 progress = true;
134 }
135 }
136
137 /* We're going to lower all function_temp memory to scratch using
138 * 64-bit addresses. We need to resize all our derefs first or else
139 * nir_lower_explicit_io will have a fit.
140 */
141 if (nir_deref_mode_is(deref, nir_var_function_temp) &&
142 resize_deref(&b, deref, 1, 64))
143 progress = true;
144 }
145 }
146
147 if (progress) {
148 nir_metadata_preserve(impl, nir_metadata_control_flow);
149 } else {
150 nir_metadata_preserve(impl, nir_metadata_all);
151 }
152
153 return progress;
154 }
155
156 /** Lowers ray-tracing shader I/O and scratch access
157 *
158 * SPV_KHR_ray_tracing adds three new types of I/O, each of which need their
159 * own bit of special care:
160 *
161 * - Shader payload data: This is represented by the IncomingCallableData
162 * and IncomingRayPayload storage classes which are both represented by
163 * nir_var_call_data in NIR. There is at most one of these per-shader and
164 * they contain payload data passed down the stack from the parent shader
165 * when it calls executeCallable() or traceRay(). In our implementation,
166 * the actual storage lives in the calling shader's scratch space and we're
167 * passed a pointer to it.
168 *
169 * - Hit attribute data: This is represented by the HitAttribute storage
170 * class in SPIR-V and nir_var_ray_hit_attrib in NIR. For triangle
171 * geometry, it's supposed to contain two floats which are the barycentric
172 * coordinates. For AABS/procedural geometry, it contains the hit data
173 * written out by the intersection shader. In our implementation, it's a
174 * 64-bit pointer which points either to the u/v area of the relevant
175 * MemHit data structure or the space right after the HW ray stack entry.
176 *
177 * - Shader record buffer data: This allows read-only access to the data
178 * stored in the SBT right after the bindless shader handles. It's
179 * effectively a UBO with a magic address. Coming out of spirv_to_nir,
180 * we get a nir_intrinsic_load_shader_record_ptr which is cast to a
181 * nir_var_mem_global deref and all access happens through that. The
182 * shader_record_ptr system value is handled in brw_nir_lower_rt_intrinsics
183 * and we assume nir_lower_explicit_io is called elsewhere thanks to
184 * VK_KHR_buffer_device_address so there's really nothing to do here.
185 *
186 * We also handle lowering any remaining function_temp variables to scratch at
187 * this point. This gets rid of any remaining arrays and also takes care of
188 * the sending side of ray payloads where we pass pointers to a function_temp
189 * variable down the call stack.
190 */
191 static void
lower_rt_io_and_scratch(nir_shader * nir)192 lower_rt_io_and_scratch(nir_shader *nir)
193 {
194 /* First, we to ensure all the I/O variables have explicit types. Because
195 * these are shader-internal and don't come in from outside, they don't
196 * have an explicit memory layout and we have to assign them one.
197 */
198 NIR_PASS_V(nir, nir_lower_vars_to_explicit_types,
199 nir_var_function_temp |
200 nir_var_shader_call_data |
201 nir_var_ray_hit_attrib,
202 glsl_get_natural_size_align_bytes);
203
204 /* Now patch any derefs to I/O vars */
205 NIR_PASS_V(nir, lower_rt_io_derefs);
206
207 /* Finally, lower any remaining function_temp, mem_constant, or
208 * ray_hit_attrib access to 64-bit global memory access.
209 */
210 NIR_PASS_V(nir, nir_lower_explicit_io,
211 nir_var_function_temp |
212 nir_var_mem_constant |
213 nir_var_ray_hit_attrib,
214 nir_address_format_64bit_global);
215 }
216
217 static void
build_terminate_ray(nir_builder * b)218 build_terminate_ray(nir_builder *b)
219 {
220 nir_def *skip_closest_hit = nir_test_mask(b, nir_load_ray_flags(b),
221 BRW_RT_RAY_FLAG_SKIP_CLOSEST_HIT_SHADER);
222 nir_push_if(b, skip_closest_hit);
223 {
224 /* The shader that calls traceRay() is unable to access any ray hit
225 * information except for that which is explicitly written into the ray
226 * payload by shaders invoked during the trace. If there's no closest-
227 * hit shader, then accepting the hit has no observable effect; it's
228 * just extra memory traffic for no reason.
229 */
230 brw_nir_btd_return(b);
231 nir_jump(b, nir_jump_halt);
232 }
233 nir_push_else(b, NULL);
234 {
235 /* The closest hit shader is in the same shader group as the any-hit
236 * shader that we're currently in. We can get the address for its SBT
237 * handle by looking at the shader record pointer and subtracting the
238 * size of a SBT handle. The BINDLESS_SHADER_RECORD for a closest hit
239 * shader is the first one in the SBT handle.
240 */
241 nir_def *closest_hit =
242 nir_iadd_imm(b, nir_load_shader_record_ptr(b),
243 -BRW_RT_SBT_HANDLE_SIZE);
244
245 brw_nir_rt_commit_hit(b);
246 brw_nir_btd_spawn(b, closest_hit);
247 nir_jump(b, nir_jump_halt);
248 }
249 nir_pop_if(b, NULL);
250 }
251
252 /** Lowers away ray walk intrinsics
253 *
254 * This lowers terminate_ray, ignore_ray_intersection, and the NIR-specific
255 * accept_ray_intersection intrinsics to the appropriate Intel-specific
256 * intrinsics.
257 */
258 static bool
lower_ray_walk_intrinsics(nir_shader * shader,const struct intel_device_info * devinfo)259 lower_ray_walk_intrinsics(nir_shader *shader,
260 const struct intel_device_info *devinfo)
261 {
262 assert(shader->info.stage == MESA_SHADER_ANY_HIT ||
263 shader->info.stage == MESA_SHADER_INTERSECTION);
264
265 nir_function_impl *impl = nir_shader_get_entrypoint(shader);
266
267 nir_builder b = nir_builder_create(impl);
268
269 bool progress = false;
270 nir_foreach_block_safe(block, impl) {
271 nir_foreach_instr_safe(instr, block) {
272 if (instr->type != nir_instr_type_intrinsic)
273 continue;
274
275 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
276
277 switch (intrin->intrinsic) {
278 case nir_intrinsic_ignore_ray_intersection: {
279 b.cursor = nir_instr_remove(&intrin->instr);
280
281 /* We put the newly emitted code inside a dummy if because it's
282 * going to contain a jump instruction and we don't want to deal
283 * with that mess here. It'll get dealt with by our control-flow
284 * optimization passes.
285 */
286 nir_push_if(&b, nir_imm_true(&b));
287 nir_trace_ray_intel(&b,
288 nir_load_btd_global_arg_addr_intel(&b),
289 nir_imm_int(&b, BRW_RT_BVH_LEVEL_OBJECT),
290 nir_imm_int(&b, GEN_RT_TRACE_RAY_CONTINUE),
291 .synchronous = false);
292 nir_jump(&b, nir_jump_halt);
293 nir_pop_if(&b, NULL);
294 progress = true;
295 break;
296 }
297
298 case nir_intrinsic_accept_ray_intersection: {
299 b.cursor = nir_instr_remove(&intrin->instr);
300
301 nir_def *terminate = nir_test_mask(&b, nir_load_ray_flags(&b),
302 BRW_RT_RAY_FLAG_TERMINATE_ON_FIRST_HIT);
303 nir_push_if(&b, terminate);
304 {
305 build_terminate_ray(&b);
306 }
307 nir_push_else(&b, NULL);
308 {
309 nir_trace_ray_intel(&b,
310 nir_load_btd_global_arg_addr_intel(&b),
311 nir_imm_int(&b, BRW_RT_BVH_LEVEL_OBJECT),
312 nir_imm_int(&b, GEN_RT_TRACE_RAY_COMMIT),
313 .synchronous = false);
314 nir_jump(&b, nir_jump_halt);
315 }
316 nir_pop_if(&b, NULL);
317 progress = true;
318 break;
319 }
320
321 case nir_intrinsic_terminate_ray: {
322 b.cursor = nir_instr_remove(&intrin->instr);
323 build_terminate_ray(&b);
324 progress = true;
325 break;
326 }
327
328 default:
329 break;
330 }
331 }
332 }
333
334 if (progress) {
335 nir_metadata_preserve(impl, nir_metadata_none);
336 } else {
337 nir_metadata_preserve(impl, nir_metadata_all);
338 }
339
340 return progress;
341 }
342
343 void
brw_nir_lower_raygen(nir_shader * nir)344 brw_nir_lower_raygen(nir_shader *nir)
345 {
346 assert(nir->info.stage == MESA_SHADER_RAYGEN);
347 NIR_PASS_V(nir, brw_nir_lower_shader_returns);
348 lower_rt_io_and_scratch(nir);
349 }
350
351 void
brw_nir_lower_any_hit(nir_shader * nir,const struct intel_device_info * devinfo)352 brw_nir_lower_any_hit(nir_shader *nir, const struct intel_device_info *devinfo)
353 {
354 assert(nir->info.stage == MESA_SHADER_ANY_HIT);
355 NIR_PASS_V(nir, brw_nir_lower_shader_returns);
356 NIR_PASS_V(nir, lower_ray_walk_intrinsics, devinfo);
357 lower_rt_io_and_scratch(nir);
358 }
359
360 void
brw_nir_lower_closest_hit(nir_shader * nir)361 brw_nir_lower_closest_hit(nir_shader *nir)
362 {
363 assert(nir->info.stage == MESA_SHADER_CLOSEST_HIT);
364 NIR_PASS_V(nir, brw_nir_lower_shader_returns);
365 lower_rt_io_and_scratch(nir);
366 }
367
368 void
brw_nir_lower_miss(nir_shader * nir)369 brw_nir_lower_miss(nir_shader *nir)
370 {
371 assert(nir->info.stage == MESA_SHADER_MISS);
372 NIR_PASS_V(nir, brw_nir_lower_shader_returns);
373 lower_rt_io_and_scratch(nir);
374 }
375
376 void
brw_nir_lower_callable(nir_shader * nir)377 brw_nir_lower_callable(nir_shader *nir)
378 {
379 assert(nir->info.stage == MESA_SHADER_CALLABLE);
380 NIR_PASS_V(nir, brw_nir_lower_shader_returns);
381 lower_rt_io_and_scratch(nir);
382 }
383
384 void
brw_nir_lower_combined_intersection_any_hit(nir_shader * intersection,const nir_shader * any_hit,const struct intel_device_info * devinfo)385 brw_nir_lower_combined_intersection_any_hit(nir_shader *intersection,
386 const nir_shader *any_hit,
387 const struct intel_device_info *devinfo)
388 {
389 assert(intersection->info.stage == MESA_SHADER_INTERSECTION);
390 assert(any_hit == NULL || any_hit->info.stage == MESA_SHADER_ANY_HIT);
391 NIR_PASS_V(intersection, brw_nir_lower_shader_returns);
392 NIR_PASS_V(intersection, brw_nir_lower_intersection_shader,
393 any_hit, devinfo);
394 NIR_PASS_V(intersection, lower_ray_walk_intrinsics, devinfo);
395 lower_rt_io_and_scratch(intersection);
396 }
397
398 static nir_def *
build_load_uniform(nir_builder * b,unsigned offset,unsigned num_components,unsigned bit_size)399 build_load_uniform(nir_builder *b, unsigned offset,
400 unsigned num_components, unsigned bit_size)
401 {
402 return nir_load_uniform(b, num_components, bit_size, nir_imm_int(b, 0),
403 .base = offset,
404 .range = num_components * bit_size / 8);
405 }
406
407 #define load_trampoline_param(b, name, num_components, bit_size) \
408 build_load_uniform((b), offsetof(struct brw_rt_raygen_trampoline_params, name), \
409 (num_components), (bit_size))
410
411 nir_shader *
brw_nir_create_raygen_trampoline(const struct brw_compiler * compiler,void * mem_ctx)412 brw_nir_create_raygen_trampoline(const struct brw_compiler *compiler,
413 void *mem_ctx)
414 {
415 const struct intel_device_info *devinfo = compiler->devinfo;
416 const nir_shader_compiler_options *nir_options =
417 compiler->nir_options[MESA_SHADER_COMPUTE];
418
419 STATIC_ASSERT(sizeof(struct brw_rt_raygen_trampoline_params) == 32);
420
421 nir_builder b = nir_builder_init_simple_shader(MESA_SHADER_COMPUTE,
422 nir_options,
423 "RT Ray-Gen Trampoline");
424 ralloc_steal(mem_ctx, b.shader);
425
426 b.shader->info.workgroup_size_variable = true;
427
428 /* The RT global data and raygen BINDLESS_SHADER_RECORD addresses are
429 * passed in as push constants in the first register. We deal with the
430 * raygen BSR address here; the global data we'll deal with later.
431 */
432 b.shader->num_uniforms = 32;
433 nir_def *raygen_param_bsr_addr =
434 load_trampoline_param(&b, raygen_bsr_addr, 1, 64);
435 nir_def *is_indirect =
436 nir_i2b(&b, load_trampoline_param(&b, is_indirect, 1, 8));
437 nir_def *local_shift =
438 nir_u2u32(&b, load_trampoline_param(&b, local_group_size_log2, 3, 8));
439
440 nir_def *raygen_indirect_bsr_addr;
441 nir_push_if(&b, is_indirect);
442 {
443 raygen_indirect_bsr_addr =
444 nir_load_global_constant(&b, raygen_param_bsr_addr,
445 8 /* align */,
446 1 /* components */,
447 64 /* bit_size */);
448 }
449 nir_pop_if(&b, NULL);
450
451 nir_def *raygen_bsr_addr =
452 nir_if_phi(&b, raygen_indirect_bsr_addr, raygen_param_bsr_addr);
453
454 nir_def *global_id = nir_load_workgroup_id(&b);
455 nir_def *simd_channel = nir_load_subgroup_invocation(&b);
456 nir_def *local_x =
457 nir_ubfe(&b, simd_channel, nir_imm_int(&b, 0),
458 nir_channel(&b, local_shift, 0));
459 nir_def *local_y =
460 nir_ubfe(&b, simd_channel, nir_channel(&b, local_shift, 0),
461 nir_channel(&b, local_shift, 1));
462 nir_def *local_z =
463 nir_ubfe(&b, simd_channel,
464 nir_iadd(&b, nir_channel(&b, local_shift, 0),
465 nir_channel(&b, local_shift, 1)),
466 nir_channel(&b, local_shift, 2));
467 nir_def *launch_id =
468 nir_iadd(&b, nir_ishl(&b, global_id, local_shift),
469 nir_vec3(&b, local_x, local_y, local_z));
470
471 nir_def *launch_size = nir_load_ray_launch_size(&b);
472 nir_push_if(&b, nir_ball(&b, nir_ult(&b, launch_id, launch_size)));
473 {
474 nir_store_global(&b, brw_nir_rt_sw_hotzone_addr(&b, devinfo), 16,
475 nir_vec4(&b, nir_imm_int(&b, 0), /* Stack ptr */
476 nir_channel(&b, launch_id, 0),
477 nir_channel(&b, launch_id, 1),
478 nir_channel(&b, launch_id, 2)),
479 0xf /* write mask */);
480
481 brw_nir_btd_spawn(&b, raygen_bsr_addr);
482 }
483 nir_push_else(&b, NULL);
484 {
485 /* Even though these invocations aren't being used for anything, the
486 * hardware allocated stack IDs for them. They need to retire them.
487 */
488 brw_nir_btd_retire(&b);
489 }
490 nir_pop_if(&b, NULL);
491
492 nir_shader *nir = b.shader;
493 nir->info.name = ralloc_strdup(nir, "RT: TraceRay trampoline");
494 nir_validate_shader(nir, "in brw_nir_create_raygen_trampoline");
495
496 struct brw_nir_compiler_opts opts = {};
497 brw_preprocess_nir(compiler, nir, &opts);
498
499 NIR_PASS_V(nir, brw_nir_lower_rt_intrinsics, devinfo);
500
501 b = nir_builder_create(nir_shader_get_entrypoint(b.shader));
502 /* brw_nir_lower_rt_intrinsics will leave us with a btd_global_arg_addr
503 * intrinsic which doesn't exist in compute shaders. We also created one
504 * above when we generated the BTD spawn intrinsic. Now we go through and
505 * replace them with a uniform load.
506 */
507 nir_foreach_block(block, b.impl) {
508 nir_foreach_instr_safe(instr, block) {
509 if (instr->type != nir_instr_type_intrinsic)
510 continue;
511
512 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
513 if (intrin->intrinsic != nir_intrinsic_load_btd_global_arg_addr_intel)
514 continue;
515
516 b.cursor = nir_before_instr(&intrin->instr);
517 nir_def *global_arg_addr =
518 load_trampoline_param(&b, rt_disp_globals_addr, 1, 64);
519 nir_def_replace(&intrin->def, global_arg_addr);
520 }
521 }
522
523 NIR_PASS_V(nir, brw_nir_lower_cs_intrinsics, devinfo, NULL);
524
525 brw_nir_optimize(nir, devinfo);
526
527 return nir;
528 }
529