1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott ([email protected])
25 *
26 */
27
28 #include <assert.h>
29 #include "c11/threads.h"
30 #include "util/simple_mtx.h"
31 #include "nir.h"
32 #include "nir_xfb_info.h"
33
34 /*
35 * This file checks for invalid IR indicating a bug somewhere in the compiler.
36 */
37
38 /* Since this file is just a pile of asserts, don't bother compiling it if
39 * we're not building a debug build.
40 */
41 #ifndef NDEBUG
42
43 typedef struct {
44 void *mem_ctx;
45
46 /* the current shader being validated */
47 nir_shader *shader;
48
49 /* the current instruction being validated */
50 nir_instr *instr;
51
52 /* the current variable being validated */
53 nir_variable *var;
54
55 /* the current basic block being validated */
56 nir_block *block;
57
58 /* the current if statement being validated */
59 nir_if *if_stmt;
60
61 /* the current loop being visited */
62 nir_loop *loop;
63
64 /* weather the loop continue construct is being visited */
65 bool in_loop_continue_construct;
66
67 /* the parent of the current cf node being visited */
68 nir_cf_node *parent_node;
69
70 /* the current function implementation being validated */
71 nir_function_impl *impl;
72
73 /* Set of all blocks in the list */
74 struct set *blocks;
75
76 /* Number of tagged nir_src's. This is implicitly the cardinality of the set
77 * of pending nir_src's.
78 */
79 uint32_t nr_tagged_srcs;
80
81 /* bitset of ssa definitions we have found; used to check uniqueness */
82 BITSET_WORD *ssa_defs_found;
83
84 /* map of variable -> function implementation where it is defined or NULL
85 * if it is a global variable
86 */
87 struct hash_table *var_defs;
88
89 /* map of instruction/var/etc to failed assert string */
90 struct hash_table *errors;
91 } validate_state;
92
93 static void
log_error(validate_state * state,const char * cond,const char * file,int line)94 log_error(validate_state *state, const char *cond, const char *file, int line)
95 {
96 const void *obj;
97
98 if (state->instr)
99 obj = state->instr;
100 else if (state->var)
101 obj = state->var;
102 else
103 obj = cond;
104
105 char *msg = ralloc_asprintf(state->errors, "error: %s (%s:%d)",
106 cond, file, line);
107
108 _mesa_hash_table_insert(state->errors, obj, msg);
109 }
110
111 static bool
validate_assert_impl(validate_state * state,bool cond,const char * str,const char * file,unsigned line)112 validate_assert_impl(validate_state *state, bool cond, const char *str,
113 const char *file, unsigned line)
114 {
115 if (unlikely(!cond))
116 log_error(state, str, file, line);
117 return cond;
118 }
119
120 #define validate_assert(state, cond) \
121 validate_assert_impl(state, (cond), #cond, __FILE__, __LINE__)
122
123 static void
validate_num_components(validate_state * state,unsigned num_components)124 validate_num_components(validate_state *state, unsigned num_components)
125 {
126 validate_assert(state, nir_num_components_valid(num_components));
127 }
128
129 /* Tag used in nir_src::_parent to indicate that a source has been seen. */
130 #define SRC_TAG_SEEN (0x2)
131
132 static_assert(SRC_TAG_SEEN == (~NIR_SRC_PARENT_MASK + 1),
133 "Parent pointer tags chosen not to collide");
134
135 static void
tag_src(nir_src * src,validate_state * state)136 tag_src(nir_src *src, validate_state *state)
137 {
138 /* nir_src only appears once and only in one SSA def use list, since we
139 * mark nir_src's as we go by tagging this pointer.
140 */
141 if (validate_assert(state, (src->_parent & SRC_TAG_SEEN) == 0)) {
142 src->_parent |= SRC_TAG_SEEN;
143 state->nr_tagged_srcs++;
144 }
145 }
146
147 /* Due to tagging, it's not safe to use nir_src_parent_instr during the main
148 * validate loop. This is a tagging-aware version.
149 */
150 static nir_instr *
src_parent_instr_safe(nir_src * src)151 src_parent_instr_safe(nir_src *src)
152 {
153 uintptr_t untagged = (src->_parent & ~SRC_TAG_SEEN);
154 assert(!(untagged & NIR_SRC_PARENT_IS_IF) && "precondition");
155 return (nir_instr *)untagged;
156 }
157
158 /*
159 * As we walk SSA defs, we mark every use as seen by tagging the parent pointer.
160 * We need to make sure our use is seen in a use list.
161 *
162 * Then we unmark when we hit the source. This will let us prove that we've
163 * seen all the sources.
164 */
165 static void
validate_src_tag(nir_src * src,validate_state * state)166 validate_src_tag(nir_src *src, validate_state *state)
167 {
168 if (validate_assert(state, src->_parent & SRC_TAG_SEEN)) {
169 src->_parent &= ~SRC_TAG_SEEN;
170 state->nr_tagged_srcs--;
171 }
172 }
173
174 static void
validate_if_src(nir_src * src,validate_state * state)175 validate_if_src(nir_src *src, validate_state *state)
176 {
177 validate_src_tag(src, state);
178 validate_assert(state, nir_src_parent_if(src) == state->if_stmt);
179 validate_assert(state, src->ssa != NULL);
180 validate_assert(state, src->ssa->num_components == 1);
181 }
182
183 static void
validate_src(nir_src * src,validate_state * state)184 validate_src(nir_src *src, validate_state *state)
185 {
186 /* Validate the tag first, so that nir_src_parent_instr is valid */
187 validate_src_tag(src, state);
188
189 /* Source assumed to be instruction, use validate_if_src for if */
190 validate_assert(state, nir_src_parent_instr(src) == state->instr);
191
192 validate_assert(state, src->ssa != NULL);
193 }
194
195 static void
validate_sized_src(nir_src * src,validate_state * state,unsigned bit_sizes,unsigned num_components)196 validate_sized_src(nir_src *src, validate_state *state,
197 unsigned bit_sizes, unsigned num_components)
198 {
199 validate_src(src, state);
200
201 if (bit_sizes)
202 validate_assert(state, src->ssa->bit_size & bit_sizes);
203 if (num_components)
204 validate_assert(state, src->ssa->num_components == num_components);
205 }
206
207 static void
validate_alu_src(nir_alu_instr * instr,unsigned index,validate_state * state)208 validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state)
209 {
210 nir_alu_src *src = &instr->src[index];
211
212 unsigned num_instr_channels = nir_ssa_alu_instr_src_components(instr, index);
213 unsigned num_components = nir_src_num_components(src->src);
214
215 for (unsigned i = 0; i < num_instr_channels; i++) {
216 validate_assert(state, src->swizzle[i] < num_components);
217 }
218
219 validate_src(&src->src, state);
220 }
221
222 static void
validate_def(nir_def * def,validate_state * state)223 validate_def(nir_def *def, validate_state *state)
224 {
225 validate_assert(state, def->index < state->impl->ssa_alloc);
226 validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
227 BITSET_SET(state->ssa_defs_found, def->index);
228
229 validate_assert(state, def->parent_instr == state->instr);
230 validate_num_components(state, def->num_components);
231
232 list_validate(&def->uses);
233 nir_foreach_use_including_if(src, def) {
234 /* Check that the def matches. */
235 validate_assert(state, src->ssa == def);
236
237 /* Check that nir_src's are unique */
238 tag_src(src, state);
239 }
240 }
241
242 static void
validate_alu_instr(nir_alu_instr * instr,validate_state * state)243 validate_alu_instr(nir_alu_instr *instr, validate_state *state)
244 {
245 validate_assert(state, instr->op < nir_num_opcodes);
246
247 unsigned instr_bit_size = 0;
248 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
249 nir_alu_type src_type = nir_op_infos[instr->op].input_types[i];
250 unsigned src_bit_size = nir_src_bit_size(instr->src[i].src);
251 if (nir_alu_type_get_type_size(src_type)) {
252 validate_assert(state, src_bit_size == nir_alu_type_get_type_size(src_type));
253 } else if (instr_bit_size) {
254 validate_assert(state, src_bit_size == instr_bit_size);
255 } else {
256 instr_bit_size = src_bit_size;
257 }
258
259 if (nir_alu_type_get_base_type(src_type) == nir_type_float) {
260 /* 8-bit float isn't a thing */
261 validate_assert(state, src_bit_size == 16 || src_bit_size == 32 ||
262 src_bit_size == 64);
263 }
264
265 /* In nir_opcodes.py, these are defined to take general uint or int
266 * sources. However, they're really only defined for 32-bit or 64-bit
267 * sources. This seems to be the only place to enforce this
268 * restriction.
269 */
270 switch (instr->op) {
271 case nir_op_ufind_msb:
272 case nir_op_ufind_msb_rev:
273 validate_assert(state, src_bit_size == 32 || src_bit_size == 64);
274 break;
275
276 default:
277 break;
278 }
279
280 validate_alu_src(instr, i, state);
281 }
282
283 nir_alu_type dest_type = nir_op_infos[instr->op].output_type;
284 unsigned dest_bit_size = instr->def.bit_size;
285 if (nir_alu_type_get_type_size(dest_type)) {
286 validate_assert(state, dest_bit_size == nir_alu_type_get_type_size(dest_type));
287 } else if (instr_bit_size) {
288 validate_assert(state, dest_bit_size == instr_bit_size);
289 } else {
290 /* The only unsized thing is the destination so it's vacuously valid */
291 }
292
293 if (nir_alu_type_get_base_type(dest_type) == nir_type_float) {
294 /* 8-bit float isn't a thing */
295 validate_assert(state, dest_bit_size == 16 || dest_bit_size == 32 ||
296 dest_bit_size == 64);
297 }
298
299 validate_def(&instr->def, state);
300 }
301
302 static void
validate_var_use(nir_variable * var,validate_state * state)303 validate_var_use(nir_variable *var, validate_state *state)
304 {
305 struct hash_entry *entry = _mesa_hash_table_search(state->var_defs, var);
306 validate_assert(state, entry);
307 if (entry && var->data.mode == nir_var_function_temp)
308 validate_assert(state, (nir_function_impl *)entry->data == state->impl);
309 }
310
311 static void
validate_deref_instr(nir_deref_instr * instr,validate_state * state)312 validate_deref_instr(nir_deref_instr *instr, validate_state *state)
313 {
314 if (instr->deref_type == nir_deref_type_var) {
315 /* Variable dereferences are stupid simple. */
316 validate_assert(state, instr->modes == instr->var->data.mode);
317 validate_assert(state, instr->type == instr->var->type);
318 validate_var_use(instr->var, state);
319 } else if (instr->deref_type == nir_deref_type_cast) {
320 /* For cast, we simply have to trust the instruction. It's up to
321 * lowering passes and front/back-ends to make them sane.
322 */
323 validate_src(&instr->parent, state);
324
325 /* Most variable modes in NIR can only exist by themselves. */
326 if (instr->modes & ~nir_var_mem_generic)
327 validate_assert(state, util_bitcount(instr->modes) == 1);
328
329 nir_deref_instr *parent = nir_src_as_deref(instr->parent);
330 if (parent) {
331 /* Casts can change the mode but it can't change completely. The new
332 * mode must have some bits in common with the old.
333 */
334 validate_assert(state, instr->modes & parent->modes);
335 } else {
336 /* If our parent isn't a deref, just assert the mode is there */
337 validate_assert(state, instr->modes != 0);
338 }
339
340 /* We just validate that the type is there */
341 validate_assert(state, instr->type);
342 if (instr->cast.align_mul > 0) {
343 validate_assert(state, util_is_power_of_two_nonzero(instr->cast.align_mul));
344 validate_assert(state, instr->cast.align_offset < instr->cast.align_mul);
345 } else {
346 validate_assert(state, instr->cast.align_offset == 0);
347 }
348 } else {
349 /* The parent pointer value must have the same number of components
350 * as the destination.
351 */
352 validate_sized_src(&instr->parent, state, instr->def.bit_size,
353 instr->def.num_components);
354
355 nir_instr *parent_instr = instr->parent.ssa->parent_instr;
356
357 /* The parent must come from another deref instruction */
358 validate_assert(state, parent_instr->type == nir_instr_type_deref);
359
360 nir_deref_instr *parent = nir_instr_as_deref(parent_instr);
361
362 validate_assert(state, instr->modes == parent->modes);
363
364 switch (instr->deref_type) {
365 case nir_deref_type_struct:
366 validate_assert(state, glsl_type_is_struct_or_ifc(parent->type));
367 validate_assert(state,
368 instr->strct.index < glsl_get_length(parent->type));
369 validate_assert(state, instr->type ==
370 glsl_get_struct_field(parent->type, instr->strct.index));
371 break;
372
373 case nir_deref_type_array:
374 case nir_deref_type_array_wildcard:
375 if (instr->modes & nir_var_vec_indexable_modes) {
376 /* Shared variables and UBO/SSBOs have a bit more relaxed rules
377 * because we need to be able to handle array derefs on vectors.
378 * Fortunately, nir_lower_io handles these just fine.
379 */
380 validate_assert(state, glsl_type_is_array(parent->type) ||
381 glsl_type_is_matrix(parent->type) ||
382 glsl_type_is_vector(parent->type));
383 } else {
384 /* Most of NIR cannot handle array derefs on vectors */
385 validate_assert(state, glsl_type_is_array(parent->type) ||
386 glsl_type_is_matrix(parent->type));
387 }
388 validate_assert(state,
389 instr->type == glsl_get_array_element(parent->type));
390
391 if (instr->deref_type == nir_deref_type_array) {
392 validate_sized_src(&instr->arr.index, state,
393 instr->def.bit_size, 1);
394 }
395 break;
396
397 case nir_deref_type_ptr_as_array:
398 /* ptr_as_array derefs must have a parent that is either an array,
399 * ptr_as_array, or cast. If the parent is a cast, we get the stride
400 * information (if any) from the cast deref.
401 */
402 validate_assert(state,
403 parent->deref_type == nir_deref_type_array ||
404 parent->deref_type == nir_deref_type_ptr_as_array ||
405 parent->deref_type == nir_deref_type_cast);
406 validate_sized_src(&instr->arr.index, state,
407 instr->def.bit_size, 1);
408 break;
409
410 default:
411 unreachable("Invalid deref instruction type");
412 }
413 }
414
415 /* We intentionally don't validate the size of the destination because we
416 * want to let other compiler components such as SPIR-V decide how big
417 * pointers should be.
418 */
419 validate_def(&instr->def, state);
420
421 /* Certain modes cannot be used as sources for phi instructions because
422 * way too many passes assume that they can always chase deref chains.
423 */
424 nir_foreach_use_including_if(use, &instr->def) {
425 /* Deref instructions as if conditions don't make sense because if
426 * conditions expect well-formed Booleans. If you want to compare with
427 * NULL, an explicit comparison operation should be used.
428 */
429 if (!validate_assert(state, !nir_src_is_if(use)))
430 continue;
431
432 if (src_parent_instr_safe(use)->type == nir_instr_type_phi) {
433 validate_assert(state, !(instr->modes & (nir_var_shader_in |
434 nir_var_shader_out |
435 nir_var_shader_out |
436 nir_var_uniform)));
437 }
438 }
439 }
440
441 static bool
vectorized_intrinsic(nir_intrinsic_instr * intr)442 vectorized_intrinsic(nir_intrinsic_instr *intr)
443 {
444 const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
445
446 if (info->dest_components == 0)
447 return true;
448
449 for (unsigned i = 0; i < info->num_srcs; i++)
450 if (info->src_components[i] == 0)
451 return true;
452
453 return false;
454 }
455
456 /** Returns the image format or PIPE_FORMAT_COUNT for incomplete derefs
457 *
458 * We use PIPE_FORMAT_COUNT for incomplete derefs because PIPE_FORMAT_NONE
459 * indicates that we found the variable but it has no format specified.
460 */
461 static enum pipe_format
image_intrin_format(nir_intrinsic_instr * instr)462 image_intrin_format(nir_intrinsic_instr *instr)
463 {
464 if (nir_intrinsic_format(instr) != PIPE_FORMAT_NONE)
465 return nir_intrinsic_format(instr);
466
467 /* If this not a deref intrinsic, PIPE_FORMAT_NONE is the best we can do */
468 if (nir_intrinsic_infos[instr->intrinsic].src_components[0] != -1)
469 return PIPE_FORMAT_NONE;
470
471 nir_variable *var = nir_intrinsic_get_var(instr, 0);
472 if (var == NULL)
473 return PIPE_FORMAT_COUNT;
474
475 return var->data.image.format;
476 }
477
478 static void
validate_register_handle(nir_src handle_src,unsigned num_components,unsigned bit_size,validate_state * state)479 validate_register_handle(nir_src handle_src,
480 unsigned num_components,
481 unsigned bit_size,
482 validate_state *state)
483 {
484 nir_def *handle = handle_src.ssa;
485 nir_instr *parent = handle->parent_instr;
486
487 if (!validate_assert(state, parent->type == nir_instr_type_intrinsic))
488 return;
489
490 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(parent);
491 if (!validate_assert(state, intr->intrinsic == nir_intrinsic_decl_reg))
492 return;
493
494 validate_assert(state, nir_intrinsic_num_components(intr) == num_components);
495 validate_assert(state, nir_intrinsic_bit_size(intr) == bit_size);
496 }
497
498 static void
validate_intrinsic_instr(nir_intrinsic_instr * instr,validate_state * state)499 validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
500 {
501 unsigned dest_bit_size = 0;
502 unsigned src_bit_sizes[NIR_INTRINSIC_MAX_INPUTS] = {
503 0,
504 };
505 switch (instr->intrinsic) {
506 case nir_intrinsic_decl_reg:
507 assert(state->block == nir_start_block(state->impl));
508 break;
509
510 case nir_intrinsic_load_reg:
511 case nir_intrinsic_load_reg_indirect:
512 validate_register_handle(instr->src[0],
513 instr->def.num_components,
514 instr->def.bit_size, state);
515 break;
516
517 case nir_intrinsic_store_reg:
518 case nir_intrinsic_store_reg_indirect:
519 validate_register_handle(instr->src[1],
520 nir_src_num_components(instr->src[0]),
521 nir_src_bit_size(instr->src[0]), state);
522 break;
523
524 case nir_intrinsic_convert_alu_types: {
525 nir_alu_type src_type = nir_intrinsic_src_type(instr);
526 nir_alu_type dest_type = nir_intrinsic_dest_type(instr);
527 dest_bit_size = nir_alu_type_get_type_size(dest_type);
528 src_bit_sizes[0] = nir_alu_type_get_type_size(src_type);
529 validate_assert(state, dest_bit_size != 0);
530 validate_assert(state, src_bit_sizes[0] != 0);
531 break;
532 }
533
534 case nir_intrinsic_load_param: {
535 unsigned param_idx = nir_intrinsic_param_idx(instr);
536 validate_assert(state, param_idx < state->impl->function->num_params);
537 nir_parameter *param = &state->impl->function->params[param_idx];
538 validate_assert(state, instr->num_components == param->num_components);
539 dest_bit_size = param->bit_size;
540 break;
541 }
542
543 case nir_intrinsic_load_deref: {
544 nir_deref_instr *src = nir_src_as_deref(instr->src[0]);
545 assert(src);
546 validate_assert(state, glsl_type_is_vector_or_scalar(src->type) ||
547 (src->modes == nir_var_uniform &&
548 glsl_get_base_type(src->type) == GLSL_TYPE_SUBROUTINE));
549 validate_assert(state, instr->num_components ==
550 glsl_get_vector_elements(src->type));
551 dest_bit_size = glsl_get_bit_size(src->type);
552 /* Also allow 32-bit boolean load operations */
553 if (glsl_type_is_boolean(src->type))
554 dest_bit_size |= 32;
555 break;
556 }
557
558 case nir_intrinsic_store_deref: {
559 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
560 assert(dst);
561 validate_assert(state, glsl_type_is_vector_or_scalar(dst->type));
562 validate_assert(state, instr->num_components ==
563 glsl_get_vector_elements(dst->type));
564 src_bit_sizes[1] = glsl_get_bit_size(dst->type);
565 /* Also allow 32-bit boolean store operations */
566 if (glsl_type_is_boolean(dst->type))
567 src_bit_sizes[1] |= 32;
568 validate_assert(state, !nir_deref_mode_may_be(dst, nir_var_read_only_modes));
569 break;
570 }
571
572 case nir_intrinsic_copy_deref: {
573 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
574 nir_deref_instr *src = nir_src_as_deref(instr->src[1]);
575 validate_assert(state, glsl_get_bare_type(dst->type) ==
576 glsl_get_bare_type(src->type));
577 validate_assert(state, !nir_deref_mode_may_be(dst, nir_var_read_only_modes));
578 /* FIXME: now that we track if the var copies were lowered, it would be
579 * good to validate here that no new copy derefs were added. Right now
580 * we can't as there are some specific cases where copies are added even
581 * after the lowering. One example is the Intel compiler, that calls
582 * nir_lower_io_to_temporaries when linking some shader stages.
583 */
584 break;
585 }
586
587 case nir_intrinsic_load_ubo_vec4: {
588 int bit_size = instr->def.bit_size;
589 validate_assert(state, bit_size >= 8);
590 validate_assert(state, (nir_intrinsic_component(instr) +
591 instr->num_components) *
592 (bit_size / 8) <=
593 16);
594 break;
595 }
596
597 case nir_intrinsic_load_ubo:
598 /* Make sure that the creator didn't forget to set the range_base+range. */
599 validate_assert(state, nir_intrinsic_range(instr) != 0);
600 FALLTHROUGH;
601 case nir_intrinsic_load_ssbo:
602 case nir_intrinsic_load_shared:
603 case nir_intrinsic_load_global:
604 case nir_intrinsic_load_global_constant:
605 case nir_intrinsic_load_scratch:
606 case nir_intrinsic_load_constant:
607 /* These memory load operations must have alignments */
608 validate_assert(state,
609 util_is_power_of_two_nonzero(nir_intrinsic_align_mul(instr)));
610 validate_assert(state, nir_intrinsic_align_offset(instr) <
611 nir_intrinsic_align_mul(instr));
612 FALLTHROUGH;
613
614 case nir_intrinsic_load_uniform:
615 case nir_intrinsic_load_input:
616 case nir_intrinsic_load_per_primitive_input:
617 case nir_intrinsic_load_per_vertex_input:
618 case nir_intrinsic_load_interpolated_input:
619 case nir_intrinsic_load_output:
620 case nir_intrinsic_load_per_vertex_output:
621 case nir_intrinsic_load_per_primitive_output:
622 case nir_intrinsic_load_push_constant:
623 /* All memory load operations must load at least a byte */
624 validate_assert(state, instr->def.bit_size >= 8);
625 break;
626
627 case nir_intrinsic_load_barycentric_pixel:
628 case nir_intrinsic_load_barycentric_centroid:
629 case nir_intrinsic_load_barycentric_sample:
630 case nir_intrinsic_load_barycentric_at_offset:
631 case nir_intrinsic_load_barycentric_at_sample: {
632 enum glsl_interp_mode mode = nir_intrinsic_interp_mode(instr);
633 validate_assert(state,
634 mode == INTERP_MODE_NONE ||
635 mode == INTERP_MODE_SMOOTH ||
636 mode == INTERP_MODE_NOPERSPECTIVE);
637 break;
638 }
639
640 case nir_intrinsic_store_ssbo:
641 case nir_intrinsic_store_shared:
642 case nir_intrinsic_store_global:
643 case nir_intrinsic_store_scratch:
644 /* These memory store operations must also have alignments */
645 validate_assert(state,
646 util_is_power_of_two_nonzero(nir_intrinsic_align_mul(instr)));
647 validate_assert(state, nir_intrinsic_align_offset(instr) <
648 nir_intrinsic_align_mul(instr));
649 /* All memory store operations must store at least a byte */
650 validate_assert(state, nir_src_bit_size(instr->src[0]) >= 8);
651 break;
652
653 case nir_intrinsic_store_output:
654 case nir_intrinsic_store_per_vertex_output:
655 if (state->shader->info.stage == MESA_SHADER_FRAGMENT)
656 validate_assert(state, nir_src_bit_size(instr->src[0]) >= 8);
657 else
658 validate_assert(state, nir_src_bit_size(instr->src[0]) >= 16);
659 validate_assert(state,
660 nir_src_bit_size(instr->src[0]) ==
661 nir_alu_type_get_type_size(nir_intrinsic_src_type(instr)));
662 break;
663
664 case nir_intrinsic_deref_mode_is:
665 case nir_intrinsic_addr_mode_is:
666 validate_assert(state,
667 util_bitcount(nir_intrinsic_memory_modes(instr)) == 1);
668 break;
669
670 case nir_intrinsic_image_deref_atomic:
671 case nir_intrinsic_image_deref_atomic_swap:
672 case nir_intrinsic_bindless_image_atomic:
673 case nir_intrinsic_bindless_image_atomic_swap:
674 case nir_intrinsic_image_atomic:
675 case nir_intrinsic_image_atomic_swap: {
676 nir_atomic_op op = nir_intrinsic_atomic_op(instr);
677
678 enum pipe_format format = image_intrin_format(instr);
679 if (format != PIPE_FORMAT_COUNT) {
680 bool allowed = false;
681 bool is_float = (nir_atomic_op_type(op) == nir_type_float);
682
683 switch (format) {
684 case PIPE_FORMAT_R32_FLOAT:
685 allowed = is_float || op == nir_atomic_op_xchg;
686 break;
687 case PIPE_FORMAT_R16_FLOAT:
688 case PIPE_FORMAT_R64_FLOAT:
689 allowed = op == nir_atomic_op_fmin || op == nir_atomic_op_fmax;
690 break;
691 case PIPE_FORMAT_R32_UINT:
692 case PIPE_FORMAT_R32_SINT:
693 case PIPE_FORMAT_R64_UINT:
694 case PIPE_FORMAT_R64_SINT:
695 allowed = !is_float;
696 break;
697 default:
698 break;
699 }
700
701 validate_assert(state, allowed);
702 validate_assert(state, instr->def.bit_size ==
703 util_format_get_blocksizebits(format));
704 }
705 break;
706 }
707
708 case nir_intrinsic_store_buffer_amd:
709 if (nir_intrinsic_access(instr) & ACCESS_USES_FORMAT_AMD) {
710 unsigned writemask = nir_intrinsic_write_mask(instr);
711
712 /* Make sure the writemask is derived from the component count. */
713 validate_assert(state,
714 writemask ==
715 BITFIELD_MASK(nir_src_num_components(instr->src[0])));
716 }
717 break;
718
719 default:
720 break;
721 }
722
723 if (instr->num_components > 0)
724 validate_num_components(state, instr->num_components);
725
726 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
727 unsigned num_srcs = info->num_srcs;
728 for (unsigned i = 0; i < num_srcs; i++) {
729 unsigned components_read = nir_intrinsic_src_components(instr, i);
730
731 validate_num_components(state, components_read);
732
733 validate_sized_src(&instr->src[i], state, src_bit_sizes[i], components_read);
734 }
735
736 if (nir_intrinsic_infos[instr->intrinsic].has_dest) {
737 unsigned components_written = nir_intrinsic_dest_components(instr);
738 unsigned bit_sizes = info->dest_bit_sizes;
739 if (!bit_sizes && info->bit_size_src >= 0)
740 bit_sizes = nir_src_bit_size(instr->src[info->bit_size_src]);
741
742 validate_num_components(state, components_written);
743 if (dest_bit_size && bit_sizes)
744 validate_assert(state, dest_bit_size & bit_sizes);
745 else
746 dest_bit_size = dest_bit_size ? dest_bit_size : bit_sizes;
747
748 validate_def(&instr->def, state);
749 validate_assert(state, instr->def.num_components == components_written);
750
751 if (dest_bit_size)
752 validate_assert(state, instr->def.bit_size & dest_bit_size);
753 }
754
755 if (!vectorized_intrinsic(instr))
756 validate_assert(state, instr->num_components == 0);
757
758 if (nir_intrinsic_has_write_mask(instr)) {
759 unsigned component_mask = BITFIELD_MASK(instr->num_components);
760 validate_assert(state, (nir_intrinsic_write_mask(instr) & ~component_mask) == 0);
761 }
762
763 if (nir_intrinsic_has_io_xfb(instr)) {
764 unsigned used_mask = 0;
765
766 for (unsigned i = 0; i < 4; i++) {
767 nir_io_xfb xfb = i < 2 ? nir_intrinsic_io_xfb(instr) : nir_intrinsic_io_xfb2(instr);
768 unsigned xfb_mask = BITFIELD_RANGE(i, xfb.out[i % 2].num_components);
769
770 /* Each component can be used only once by transform feedback info. */
771 validate_assert(state, (xfb_mask & used_mask) == 0);
772 used_mask |= xfb_mask;
773 }
774 }
775
776 if (nir_intrinsic_has_io_semantics(instr) &&
777 !nir_intrinsic_infos[instr->intrinsic].has_dest) {
778 nir_io_semantics sem = nir_intrinsic_io_semantics(instr);
779
780 /* An output that has no effect shouldn't be present in the IR. */
781 validate_assert(state,
782 (nir_slot_is_sysval_output(sem.location, MESA_SHADER_NONE) &&
783 !sem.no_sysval_output) ||
784 (nir_slot_is_varying(sem.location) && !sem.no_varying) ||
785 nir_instr_xfb_write_mask(instr) ||
786 /* TCS can set no_varying and no_sysval_output, meaning
787 * that the output is only read by TCS and not TES.
788 */
789 state->shader->info.stage == MESA_SHADER_TESS_CTRL);
790 validate_assert(state,
791 (!sem.dual_source_blend_index &&
792 !sem.fb_fetch_output) ||
793 state->shader->info.stage == MESA_SHADER_FRAGMENT);
794 validate_assert(state,
795 !sem.gs_streams ||
796 state->shader->info.stage == MESA_SHADER_GEOMETRY);
797 validate_assert(state,
798 !sem.high_dvec2 ||
799 (state->shader->info.stage == MESA_SHADER_VERTEX &&
800 instr->intrinsic == nir_intrinsic_load_input));
801 validate_assert(state,
802 !sem.interp_explicit_strict ||
803 (state->shader->info.stage == MESA_SHADER_FRAGMENT &&
804 instr->intrinsic == nir_intrinsic_load_input_vertex));
805 }
806 }
807
808 static void
validate_tex_src_texture_deref(nir_tex_instr * instr,validate_state * state,nir_deref_instr * deref)809 validate_tex_src_texture_deref(nir_tex_instr *instr, validate_state *state,
810 nir_deref_instr *deref)
811 {
812 validate_assert(state, glsl_type_is_image(deref->type) ||
813 glsl_type_is_texture(deref->type) ||
814 glsl_type_is_sampler(deref->type));
815
816 switch (instr->op) {
817 case nir_texop_descriptor_amd:
818 case nir_texop_sampler_descriptor_amd:
819 case nir_texop_custom_border_color_agx:
820 break;
821 case nir_texop_lod:
822 case nir_texop_lod_bias_agx:
823 validate_assert(state, nir_alu_type_get_base_type(instr->dest_type) == nir_type_float);
824 break;
825 case nir_texop_samples_identical:
826 case nir_texop_has_custom_border_color_agx:
827 validate_assert(state, nir_alu_type_get_base_type(instr->dest_type) == nir_type_bool);
828 break;
829 case nir_texop_txs:
830 case nir_texop_texture_samples:
831 case nir_texop_query_levels:
832 case nir_texop_fragment_mask_fetch_amd:
833 case nir_texop_txf_ms_mcs_intel:
834 validate_assert(state, nir_alu_type_get_base_type(instr->dest_type) == nir_type_int ||
835 nir_alu_type_get_base_type(instr->dest_type) == nir_type_uint);
836 break;
837 default:
838 validate_assert(state,
839 glsl_get_sampler_result_type(deref->type) == GLSL_TYPE_VOID ||
840 glsl_base_type_is_integer(glsl_get_sampler_result_type(deref->type)) ==
841 (nir_alu_type_get_base_type(instr->dest_type) == nir_type_int ||
842 nir_alu_type_get_base_type(instr->dest_type) == nir_type_uint));
843 }
844 }
845
846 static void
validate_tex_instr(nir_tex_instr * instr,validate_state * state)847 validate_tex_instr(nir_tex_instr *instr, validate_state *state)
848 {
849 bool src_type_seen[nir_num_tex_src_types];
850 for (unsigned i = 0; i < nir_num_tex_src_types; i++)
851 src_type_seen[i] = false;
852
853 for (unsigned i = 0; i < instr->num_srcs; i++) {
854 validate_assert(state, !src_type_seen[instr->src[i].src_type]);
855 src_type_seen[instr->src[i].src_type] = true;
856 validate_sized_src(&instr->src[i].src, state,
857 0, nir_tex_instr_src_size(instr, i));
858
859 switch (instr->src[i].src_type) {
860
861 case nir_tex_src_comparator:
862 validate_assert(state, instr->is_shadow);
863 break;
864
865 case nir_tex_src_bias:
866 validate_assert(state, instr->op == nir_texop_txb ||
867 instr->op == nir_texop_tg4 ||
868 instr->op == nir_texop_lod);
869 break;
870
871 case nir_tex_src_lod:
872 validate_assert(state, instr->op != nir_texop_tex &&
873 instr->op != nir_texop_txb &&
874 instr->op != nir_texop_txd &&
875 instr->op != nir_texop_lod);
876 break;
877
878 case nir_tex_src_ddx:
879 case nir_tex_src_ddy:
880 validate_assert(state, instr->op == nir_texop_txd);
881 break;
882
883 case nir_tex_src_texture_deref: {
884 nir_deref_instr *deref = nir_src_as_deref(instr->src[i].src);
885 if (!validate_assert(state, deref))
886 break;
887
888 validate_tex_src_texture_deref(instr, state, deref);
889 break;
890 }
891
892 case nir_tex_src_sampler_deref: {
893 nir_deref_instr *deref = nir_src_as_deref(instr->src[i].src);
894 if (!validate_assert(state, deref))
895 break;
896
897 validate_assert(state, glsl_type_is_sampler(deref->type));
898 break;
899 }
900
901 case nir_tex_src_sampler_deref_intrinsic:
902 case nir_tex_src_texture_deref_intrinsic: {
903 nir_intrinsic_instr *intrin =
904 nir_instr_as_intrinsic(instr->src[i].src.ssa->parent_instr);
905 nir_deref_instr *deref =
906 nir_instr_as_deref(intrin->src[0].ssa->parent_instr);
907 if (!validate_assert(state, deref))
908 break;
909
910 if (instr->src[i].src_type == nir_tex_src_sampler_deref_intrinsic)
911 validate_assert(state, glsl_type_is_sampler(deref->type));
912 else
913 validate_tex_src_texture_deref(instr, state, deref);
914
915 break;
916 }
917
918 case nir_tex_src_coord:
919 case nir_tex_src_projector:
920 case nir_tex_src_offset:
921 case nir_tex_src_min_lod:
922 case nir_tex_src_ms_index:
923 case nir_tex_src_texture_offset:
924 case nir_tex_src_sampler_offset:
925 case nir_tex_src_plane:
926 case nir_tex_src_texture_handle:
927 case nir_tex_src_sampler_handle:
928 break;
929
930 default:
931 break;
932 }
933 }
934
935 bool msaa = (instr->sampler_dim == GLSL_SAMPLER_DIM_MS ||
936 instr->sampler_dim == GLSL_SAMPLER_DIM_SUBPASS_MS);
937
938 if (msaa)
939 validate_assert(state, instr->op != nir_texop_txf);
940 else
941 validate_assert(state, instr->op != nir_texop_txf_ms);
942
943 if (instr->op != nir_texop_tg4)
944 validate_assert(state, instr->component == 0);
945
946 if (nir_tex_instr_has_explicit_tg4_offsets(instr)) {
947 validate_assert(state, instr->op == nir_texop_tg4);
948 validate_assert(state, !src_type_seen[nir_tex_src_offset]);
949 }
950
951 if (instr->is_gather_implicit_lod)
952 validate_assert(state, instr->op == nir_texop_tg4);
953
954 validate_def(&instr->def, state);
955 validate_assert(state, instr->def.num_components ==
956 nir_tex_instr_dest_size(instr));
957
958 unsigned bit_size = nir_alu_type_get_type_size(instr->dest_type);
959 validate_assert(state,
960 (bit_size ? bit_size : 32) ==
961 instr->def.bit_size);
962 }
963
964 static void
validate_call_instr(nir_call_instr * instr,validate_state * state)965 validate_call_instr(nir_call_instr *instr, validate_state *state)
966 {
967 validate_assert(state, instr->num_params == instr->callee->num_params);
968
969 for (unsigned i = 0; i < instr->num_params; i++) {
970 validate_sized_src(&instr->params[i], state,
971 instr->callee->params[i].bit_size,
972 instr->callee->params[i].num_components);
973 }
974 }
975
976 static void
validate_const_value(nir_const_value * val,unsigned bit_size,bool is_null_constant,validate_state * state)977 validate_const_value(nir_const_value *val, unsigned bit_size,
978 bool is_null_constant, validate_state *state)
979 {
980 /* In order for block copies to work properly for things like instruction
981 * comparisons and [de]serialization, we require the unused bits of the
982 * nir_const_value to be zero.
983 */
984 nir_const_value cmp_val;
985 memset(&cmp_val, 0, sizeof(cmp_val));
986 if (!is_null_constant) {
987 switch (bit_size) {
988 case 1:
989 cmp_val.b = val->b;
990 break;
991 case 8:
992 cmp_val.u8 = val->u8;
993 break;
994 case 16:
995 cmp_val.u16 = val->u16;
996 break;
997 case 32:
998 cmp_val.u32 = val->u32;
999 break;
1000 case 64:
1001 cmp_val.u64 = val->u64;
1002 break;
1003 default:
1004 validate_assert(state, !"Invalid load_const bit size");
1005 }
1006 }
1007 validate_assert(state, memcmp(val, &cmp_val, sizeof(cmp_val)) == 0);
1008 }
1009
1010 static void
validate_load_const_instr(nir_load_const_instr * instr,validate_state * state)1011 validate_load_const_instr(nir_load_const_instr *instr, validate_state *state)
1012 {
1013 validate_def(&instr->def, state);
1014
1015 for (unsigned i = 0; i < instr->def.num_components; i++)
1016 validate_const_value(&instr->value[i], instr->def.bit_size, false, state);
1017 }
1018
1019 static void
validate_ssa_undef_instr(nir_undef_instr * instr,validate_state * state)1020 validate_ssa_undef_instr(nir_undef_instr *instr, validate_state *state)
1021 {
1022 validate_def(&instr->def, state);
1023 }
1024
1025 static void
validate_phi_instr(nir_phi_instr * instr,validate_state * state)1026 validate_phi_instr(nir_phi_instr *instr, validate_state *state)
1027 {
1028 /*
1029 * don't validate the sources until we get to them from their predecessor
1030 * basic blocks, to avoid validating an SSA use before its definition.
1031 */
1032
1033 validate_def(&instr->def, state);
1034
1035 exec_list_validate(&instr->srcs);
1036 validate_assert(state, exec_list_length(&instr->srcs) ==
1037 state->block->predecessors->entries);
1038 }
1039
1040 static void
validate_jump_instr(nir_jump_instr * instr,validate_state * state)1041 validate_jump_instr(nir_jump_instr *instr, validate_state *state)
1042 {
1043 nir_block *block = state->block;
1044 validate_assert(state, &instr->instr == nir_block_last_instr(block));
1045
1046 switch (instr->type) {
1047 case nir_jump_return:
1048 case nir_jump_halt:
1049 validate_assert(state, block->successors[0] == state->impl->end_block);
1050 validate_assert(state, block->successors[1] == NULL);
1051 validate_assert(state, instr->target == NULL);
1052 validate_assert(state, instr->else_target == NULL);
1053 validate_assert(state, !state->in_loop_continue_construct);
1054 break;
1055
1056 case nir_jump_break:
1057 validate_assert(state, state->impl->structured);
1058 validate_assert(state, state->loop != NULL);
1059 if (state->loop) {
1060 nir_block *after =
1061 nir_cf_node_as_block(nir_cf_node_next(&state->loop->cf_node));
1062 validate_assert(state, block->successors[0] == after);
1063 }
1064 validate_assert(state, block->successors[1] == NULL);
1065 validate_assert(state, instr->target == NULL);
1066 validate_assert(state, instr->else_target == NULL);
1067 break;
1068
1069 case nir_jump_continue:
1070 validate_assert(state, state->impl->structured);
1071 validate_assert(state, state->loop != NULL);
1072 if (state->loop) {
1073 nir_block *cont_block = nir_loop_continue_target(state->loop);
1074 validate_assert(state, block->successors[0] == cont_block);
1075 }
1076 validate_assert(state, block->successors[1] == NULL);
1077 validate_assert(state, instr->target == NULL);
1078 validate_assert(state, instr->else_target == NULL);
1079 validate_assert(state, !state->in_loop_continue_construct);
1080 break;
1081
1082 case nir_jump_goto:
1083 validate_assert(state, !state->impl->structured);
1084 validate_assert(state, instr->target == block->successors[0]);
1085 validate_assert(state, instr->target != NULL);
1086 validate_assert(state, instr->else_target == NULL);
1087 break;
1088
1089 case nir_jump_goto_if:
1090 validate_assert(state, !state->impl->structured);
1091 validate_assert(state, instr->target == block->successors[1]);
1092 validate_assert(state, instr->else_target == block->successors[0]);
1093 validate_sized_src(&instr->condition, state, 0, 1);
1094 validate_assert(state, instr->target != NULL);
1095 validate_assert(state, instr->else_target != NULL);
1096 break;
1097
1098 default:
1099 validate_assert(state, !"Invalid jump instruction type");
1100 break;
1101 }
1102 }
1103
1104 static void
validate_instr(nir_instr * instr,validate_state * state)1105 validate_instr(nir_instr *instr, validate_state *state)
1106 {
1107 validate_assert(state, instr->block == state->block);
1108
1109 state->instr = instr;
1110
1111 switch (instr->type) {
1112 case nir_instr_type_alu:
1113 validate_alu_instr(nir_instr_as_alu(instr), state);
1114 break;
1115
1116 case nir_instr_type_deref:
1117 validate_deref_instr(nir_instr_as_deref(instr), state);
1118 break;
1119
1120 case nir_instr_type_call:
1121 validate_call_instr(nir_instr_as_call(instr), state);
1122 break;
1123
1124 case nir_instr_type_intrinsic:
1125 validate_intrinsic_instr(nir_instr_as_intrinsic(instr), state);
1126 break;
1127
1128 case nir_instr_type_tex:
1129 validate_tex_instr(nir_instr_as_tex(instr), state);
1130 break;
1131
1132 case nir_instr_type_load_const:
1133 validate_load_const_instr(nir_instr_as_load_const(instr), state);
1134 break;
1135
1136 case nir_instr_type_phi:
1137 validate_phi_instr(nir_instr_as_phi(instr), state);
1138 break;
1139
1140 case nir_instr_type_undef:
1141 validate_ssa_undef_instr(nir_instr_as_undef(instr), state);
1142 break;
1143
1144 case nir_instr_type_jump:
1145 validate_jump_instr(nir_instr_as_jump(instr), state);
1146 break;
1147
1148 case nir_instr_type_debug_info:
1149 break;
1150
1151 default:
1152 validate_assert(state, !"Invalid ALU instruction type");
1153 break;
1154 }
1155
1156 state->instr = NULL;
1157 }
1158
1159 static void
validate_phi_src(nir_phi_instr * instr,nir_block * pred,validate_state * state)1160 validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state)
1161 {
1162 state->instr = &instr->instr;
1163
1164 exec_list_validate(&instr->srcs);
1165 nir_foreach_phi_src(src, instr) {
1166 if (src->pred == pred) {
1167 validate_sized_src(&src->src, state, instr->def.bit_size,
1168 instr->def.num_components);
1169 state->instr = NULL;
1170 return;
1171 }
1172 }
1173 validate_assert(state, !"Phi does not have a source corresponding to one "
1174 "of its predecessor blocks");
1175 }
1176
1177 static void
validate_phi_srcs(nir_block * block,nir_block * succ,validate_state * state)1178 validate_phi_srcs(nir_block *block, nir_block *succ, validate_state *state)
1179 {
1180 nir_foreach_phi(phi, succ) {
1181 validate_phi_src(phi, block, state);
1182 }
1183 }
1184
1185 static void
collect_blocks(struct exec_list * cf_list,validate_state * state)1186 collect_blocks(struct exec_list *cf_list, validate_state *state)
1187 {
1188 /* We walk the blocks manually here rather than using nir_foreach_block for
1189 * a few reasons:
1190 *
1191 * 1. We want to call exec_list_validate() on every linked list in the IR
1192 * which means we need to touch every linked and just walking blocks
1193 * with nir_foreach_block() would make that difficult. In particular,
1194 * we want to validate each list before the first time we walk it so
1195 * that we catch broken lists in exec_list_validate() instead of
1196 * getting stuck in a hard-to-debug infinite loop in the validator.
1197 *
1198 * 2. nir_foreach_block() depends on several invariants of the CF node
1199 * hierarchy which nir_validate_shader() is responsible for verifying.
1200 * If we used nir_foreach_block() in nir_validate_shader(), we could
1201 * end up blowing up on a bad list walk instead of throwing the much
1202 * easier to debug validation error.
1203 */
1204 exec_list_validate(cf_list);
1205 foreach_list_typed(nir_cf_node, node, node, cf_list) {
1206 switch (node->type) {
1207 case nir_cf_node_block:
1208 _mesa_set_add(state->blocks, nir_cf_node_as_block(node));
1209 break;
1210
1211 case nir_cf_node_if:
1212 collect_blocks(&nir_cf_node_as_if(node)->then_list, state);
1213 collect_blocks(&nir_cf_node_as_if(node)->else_list, state);
1214 break;
1215
1216 case nir_cf_node_loop:
1217 collect_blocks(&nir_cf_node_as_loop(node)->body, state);
1218 collect_blocks(&nir_cf_node_as_loop(node)->continue_list, state);
1219 break;
1220
1221 default:
1222 unreachable("Invalid CF node type");
1223 }
1224 }
1225 }
1226
1227 static void
collect_blocks_pdfs(nir_function_impl * impl,nir_block * block,uint32_t * count,validate_state * state)1228 collect_blocks_pdfs(nir_function_impl *impl, nir_block *block,
1229 uint32_t *count, validate_state *state)
1230 {
1231 if (block == impl->end_block)
1232 return;
1233
1234 if (_mesa_set_search(state->blocks, block))
1235 return;
1236
1237 _mesa_set_add(state->blocks, block);
1238
1239 for (uint32_t i = 0; i < ARRAY_SIZE(block->successors); i++) {
1240 if (block->successors[i] != NULL)
1241 collect_blocks_pdfs(impl, block->successors[i], count, state);
1242 }
1243
1244 /* Assert that the blocks are indexed in reverse PDFS order */
1245 validate_assert(state, block->index == --(*count));
1246 }
1247
1248 static void
collect_unstructured_blocks(nir_function_impl * impl,validate_state * state)1249 collect_unstructured_blocks(nir_function_impl *impl, validate_state *state)
1250 {
1251 exec_list_validate(&impl->body);
1252
1253 /* Assert that the blocks are properly indexed */
1254 uint32_t count = 0;
1255 foreach_list_typed(nir_cf_node, node, node, &impl->body) {
1256 nir_block *block = nir_cf_node_as_block(node);
1257 validate_assert(state, block->index == count++);
1258 }
1259 validate_assert(state, impl->end_block->index == count);
1260
1261 collect_blocks_pdfs(impl, nir_start_block(impl), &count, state);
1262 }
1263
1264 static void validate_cf_node(nir_cf_node *node, validate_state *state);
1265
1266 static void
validate_block_predecessors(nir_block * block,validate_state * state)1267 validate_block_predecessors(nir_block *block, validate_state *state)
1268 {
1269 for (unsigned i = 0; i < 2; i++) {
1270 if (block->successors[i] == NULL)
1271 continue;
1272
1273 /* The block has to exist in the nir_function_impl */
1274 validate_assert(state, _mesa_set_search(state->blocks,
1275 block->successors[i]));
1276
1277 /* And we have to be in our successor's predecessors set */
1278 validate_assert(state,
1279 _mesa_set_search(block->successors[i]->predecessors, block));
1280
1281 validate_phi_srcs(block, block->successors[i], state);
1282 }
1283
1284 /* The start block cannot have any predecessors */
1285 if (block == nir_start_block(state->impl))
1286 validate_assert(state, block->predecessors->entries == 0);
1287
1288 set_foreach(block->predecessors, entry) {
1289 const nir_block *pred = entry->key;
1290 validate_assert(state, _mesa_set_search(state->blocks, pred));
1291 validate_assert(state, pred->successors[0] == block ||
1292 pred->successors[1] == block);
1293 }
1294 }
1295
1296 static void
validate_block(nir_block * block,validate_state * state)1297 validate_block(nir_block *block, validate_state *state)
1298 {
1299 validate_assert(state, block->cf_node.parent == state->parent_node);
1300
1301 state->block = block;
1302
1303 exec_list_validate(&block->instr_list);
1304 nir_foreach_instr(instr, block) {
1305 if (instr->type == nir_instr_type_phi) {
1306 validate_assert(state, instr == nir_block_first_instr(block) ||
1307 nir_instr_prev(instr)->type == nir_instr_type_phi);
1308 }
1309
1310 validate_instr(instr, state);
1311 }
1312
1313 validate_assert(state, block->successors[0] != NULL);
1314 validate_assert(state, block->successors[0] != block->successors[1]);
1315 validate_block_predecessors(block, state);
1316
1317 if (!state->impl->structured) {
1318 validate_assert(state, nir_block_ends_in_jump(block));
1319 } else if (!nir_block_ends_in_jump(block)) {
1320 nir_cf_node *next = nir_cf_node_next(&block->cf_node);
1321 if (next == NULL) {
1322 switch (state->parent_node->type) {
1323 case nir_cf_node_loop: {
1324 if (block == nir_loop_last_block(state->loop)) {
1325 nir_block *cont = nir_loop_continue_target(state->loop);
1326 validate_assert(state, block->successors[0] == cont);
1327 } else {
1328 validate_assert(state, nir_loop_has_continue_construct(state->loop) &&
1329 block == nir_loop_last_continue_block(state->loop));
1330 nir_block *head = nir_loop_first_block(state->loop);
1331 validate_assert(state, block->successors[0] == head);
1332 }
1333 /* due to the hack for infinite loops, block->successors[1] may
1334 * point to the block after the loop.
1335 */
1336 break;
1337 }
1338
1339 case nir_cf_node_if: {
1340 nir_block *after =
1341 nir_cf_node_as_block(nir_cf_node_next(state->parent_node));
1342 validate_assert(state, block->successors[0] == after);
1343 validate_assert(state, block->successors[1] == NULL);
1344 break;
1345 }
1346
1347 case nir_cf_node_function:
1348 validate_assert(state, block->successors[0] == state->impl->end_block);
1349 validate_assert(state, block->successors[1] == NULL);
1350 break;
1351
1352 default:
1353 unreachable("unknown control flow node type");
1354 }
1355 } else {
1356 if (next->type == nir_cf_node_if) {
1357 nir_if *if_stmt = nir_cf_node_as_if(next);
1358 validate_assert(state, block->successors[0] ==
1359 nir_if_first_then_block(if_stmt));
1360 validate_assert(state, block->successors[1] ==
1361 nir_if_first_else_block(if_stmt));
1362 } else if (next->type == nir_cf_node_loop) {
1363 nir_loop *loop = nir_cf_node_as_loop(next);
1364 validate_assert(state, block->successors[0] ==
1365 nir_loop_first_block(loop));
1366 validate_assert(state, block->successors[1] == NULL);
1367 } else {
1368 validate_assert(state,
1369 !"Structured NIR cannot have consecutive blocks");
1370 }
1371 }
1372 }
1373 }
1374
1375 static void
validate_end_block(nir_block * block,validate_state * state)1376 validate_end_block(nir_block *block, validate_state *state)
1377 {
1378 validate_assert(state, block->cf_node.parent == &state->impl->cf_node);
1379
1380 exec_list_validate(&block->instr_list);
1381 validate_assert(state, exec_list_is_empty(&block->instr_list));
1382
1383 validate_assert(state, block->successors[0] == NULL);
1384 validate_assert(state, block->successors[1] == NULL);
1385 validate_block_predecessors(block, state);
1386 }
1387
1388 static void
validate_if(nir_if * if_stmt,validate_state * state)1389 validate_if(nir_if *if_stmt, validate_state *state)
1390 {
1391 validate_assert(state, state->impl->structured);
1392
1393 state->if_stmt = if_stmt;
1394
1395 validate_assert(state, !exec_node_is_head_sentinel(if_stmt->cf_node.node.prev));
1396 nir_cf_node *prev_node = nir_cf_node_prev(&if_stmt->cf_node);
1397 validate_assert(state, prev_node->type == nir_cf_node_block);
1398
1399 validate_assert(state, !exec_node_is_tail_sentinel(if_stmt->cf_node.node.next));
1400 nir_cf_node *next_node = nir_cf_node_next(&if_stmt->cf_node);
1401 validate_assert(state, next_node->type == nir_cf_node_block);
1402
1403 validate_assert(state, nir_src_is_if(&if_stmt->condition));
1404 validate_if_src(&if_stmt->condition, state);
1405
1406 validate_assert(state, !exec_list_is_empty(&if_stmt->then_list));
1407 validate_assert(state, !exec_list_is_empty(&if_stmt->else_list));
1408
1409 nir_cf_node *old_parent = state->parent_node;
1410 state->parent_node = &if_stmt->cf_node;
1411
1412 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->then_list) {
1413 validate_cf_node(cf_node, state);
1414 }
1415
1416 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->else_list) {
1417 validate_cf_node(cf_node, state);
1418 }
1419
1420 state->parent_node = old_parent;
1421 state->if_stmt = NULL;
1422 }
1423
1424 static void
validate_loop(nir_loop * loop,validate_state * state)1425 validate_loop(nir_loop *loop, validate_state *state)
1426 {
1427 validate_assert(state, state->impl->structured);
1428
1429 validate_assert(state, !exec_node_is_head_sentinel(loop->cf_node.node.prev));
1430 nir_cf_node *prev_node = nir_cf_node_prev(&loop->cf_node);
1431 validate_assert(state, prev_node->type == nir_cf_node_block);
1432
1433 validate_assert(state, !exec_node_is_tail_sentinel(loop->cf_node.node.next));
1434 nir_cf_node *next_node = nir_cf_node_next(&loop->cf_node);
1435 validate_assert(state, next_node->type == nir_cf_node_block);
1436
1437 validate_assert(state, !exec_list_is_empty(&loop->body));
1438
1439 nir_cf_node *old_parent = state->parent_node;
1440 state->parent_node = &loop->cf_node;
1441 nir_loop *old_loop = state->loop;
1442 bool old_continue_construct = state->in_loop_continue_construct;
1443 state->loop = loop;
1444 state->in_loop_continue_construct = false;
1445
1446 foreach_list_typed(nir_cf_node, cf_node, node, &loop->body) {
1447 validate_cf_node(cf_node, state);
1448 }
1449 state->in_loop_continue_construct = true;
1450 foreach_list_typed(nir_cf_node, cf_node, node, &loop->continue_list) {
1451 validate_cf_node(cf_node, state);
1452 }
1453 state->in_loop_continue_construct = false;
1454 state->parent_node = old_parent;
1455 state->loop = old_loop;
1456 state->in_loop_continue_construct = old_continue_construct;
1457 }
1458
1459 static void
validate_cf_node(nir_cf_node * node,validate_state * state)1460 validate_cf_node(nir_cf_node *node, validate_state *state)
1461 {
1462 validate_assert(state, node->parent == state->parent_node);
1463
1464 switch (node->type) {
1465 case nir_cf_node_block:
1466 validate_block(nir_cf_node_as_block(node), state);
1467 break;
1468
1469 case nir_cf_node_if:
1470 validate_if(nir_cf_node_as_if(node), state);
1471 break;
1472
1473 case nir_cf_node_loop:
1474 validate_loop(nir_cf_node_as_loop(node), state);
1475 break;
1476
1477 default:
1478 unreachable("Invalid CF node type");
1479 }
1480 }
1481
1482 static void
validate_constant(nir_constant * c,const struct glsl_type * type,validate_state * state)1483 validate_constant(nir_constant *c, const struct glsl_type *type,
1484 validate_state *state)
1485 {
1486 if (glsl_type_is_vector_or_scalar(type)) {
1487 unsigned num_components = glsl_get_vector_elements(type);
1488 unsigned bit_size = glsl_get_bit_size(type);
1489 for (unsigned i = 0; i < num_components; i++)
1490 validate_const_value(&c->values[i], bit_size, c->is_null_constant, state);
1491 for (unsigned i = num_components; i < NIR_MAX_VEC_COMPONENTS; i++)
1492 validate_assert(state, c->values[i].u64 == 0);
1493 } else {
1494 validate_assert(state, c->num_elements == glsl_get_length(type));
1495 if (glsl_type_is_struct_or_ifc(type)) {
1496 for (unsigned i = 0; i < c->num_elements; i++) {
1497 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
1498 validate_constant(c->elements[i], elem_type, state);
1499 validate_assert(state, !c->is_null_constant || c->elements[i]->is_null_constant);
1500 }
1501 } else if (glsl_type_is_array_or_matrix(type)) {
1502 const struct glsl_type *elem_type = glsl_get_array_element(type);
1503 for (unsigned i = 0; i < c->num_elements; i++) {
1504 validate_constant(c->elements[i], elem_type, state);
1505 validate_assert(state, !c->is_null_constant || c->elements[i]->is_null_constant);
1506 }
1507 } else {
1508 validate_assert(state, !"Invalid type for nir_constant");
1509 }
1510 }
1511 }
1512
1513 static void
validate_var_decl(nir_variable * var,nir_variable_mode valid_modes,validate_state * state)1514 validate_var_decl(nir_variable *var, nir_variable_mode valid_modes,
1515 validate_state *state)
1516 {
1517 state->var = var;
1518
1519 /* Must have exactly one mode set */
1520 validate_assert(state, util_is_power_of_two_nonzero(var->data.mode));
1521 validate_assert(state, var->data.mode & valid_modes);
1522
1523 if (var->data.compact) {
1524 /* The "compact" flag is only valid on arrays of scalars. */
1525 assert(glsl_type_is_array(var->type));
1526
1527 const struct glsl_type *type = glsl_get_array_element(var->type);
1528 if (nir_is_arrayed_io(var, state->shader->info.stage)) {
1529 if (var->data.per_view) {
1530 assert(glsl_type_is_array(type));
1531 type = glsl_get_array_element(type);
1532 }
1533 assert(glsl_type_is_array(type));
1534 assert(glsl_type_is_scalar(glsl_get_array_element(type)));
1535 } else {
1536 assert(glsl_type_is_scalar(type));
1537 }
1538 }
1539
1540 if (var->num_members > 0) {
1541 const struct glsl_type *without_array = glsl_without_array(var->type);
1542 validate_assert(state, glsl_type_is_struct_or_ifc(without_array));
1543 validate_assert(state, var->num_members == glsl_get_length(without_array));
1544 validate_assert(state, var->members != NULL);
1545 }
1546
1547 if (var->data.per_view)
1548 validate_assert(state, glsl_type_is_array(var->type));
1549
1550 if (var->constant_initializer)
1551 validate_constant(var->constant_initializer, var->type, state);
1552
1553 if (var->data.mode == nir_var_image) {
1554 validate_assert(state, !var->data.bindless);
1555 validate_assert(state, glsl_type_is_image(glsl_without_array(var->type)));
1556 }
1557
1558 if (var->data.per_vertex)
1559 validate_assert(state, state->shader->info.stage == MESA_SHADER_FRAGMENT);
1560
1561 /*
1562 * TODO validate some things ir_validate.cpp does (requires more GLSL type
1563 * support)
1564 */
1565
1566 _mesa_hash_table_insert(state->var_defs, var,
1567 valid_modes == nir_var_function_temp ? state->impl : NULL);
1568
1569 state->var = NULL;
1570 }
1571
1572 static bool
validate_ssa_def_dominance(nir_def * def,void * _state)1573 validate_ssa_def_dominance(nir_def *def, void *_state)
1574 {
1575 validate_state *state = _state;
1576
1577 validate_assert(state, def->index < state->impl->ssa_alloc);
1578 validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
1579 BITSET_SET(state->ssa_defs_found, def->index);
1580
1581 return true;
1582 }
1583
1584 static bool
validate_src_dominance(nir_src * src,void * _state)1585 validate_src_dominance(nir_src *src, void *_state)
1586 {
1587 validate_state *state = _state;
1588
1589 if (src->ssa->parent_instr->block == nir_src_parent_instr(src)->block) {
1590 validate_assert(state, src->ssa->index < state->impl->ssa_alloc);
1591 validate_assert(state, BITSET_TEST(state->ssa_defs_found,
1592 src->ssa->index));
1593 } else {
1594 validate_assert(state, nir_block_dominates(src->ssa->parent_instr->block,
1595 nir_src_parent_instr(src)->block));
1596 }
1597 return true;
1598 }
1599
1600 static void
validate_ssa_dominance(nir_function_impl * impl,validate_state * state)1601 validate_ssa_dominance(nir_function_impl *impl, validate_state *state)
1602 {
1603 nir_metadata_require(impl, nir_metadata_dominance);
1604
1605 nir_foreach_block(block, impl) {
1606 state->block = block;
1607 nir_foreach_instr(instr, block) {
1608 state->instr = instr;
1609 if (instr->type == nir_instr_type_phi) {
1610 nir_phi_instr *phi = nir_instr_as_phi(instr);
1611 nir_foreach_phi_src(src, phi) {
1612 validate_assert(state,
1613 nir_block_dominates(src->src.ssa->parent_instr->block,
1614 src->pred));
1615 }
1616 } else {
1617 nir_foreach_src(instr, validate_src_dominance, state);
1618 }
1619 nir_foreach_def(instr, validate_ssa_def_dominance, state);
1620 }
1621 }
1622 }
1623
1624 static void
validate_function_impl(nir_function_impl * impl,validate_state * state)1625 validate_function_impl(nir_function_impl *impl, validate_state *state)
1626 {
1627 validate_assert(state, impl->function->impl == impl);
1628 validate_assert(state, impl->cf_node.parent == NULL);
1629
1630 if (impl->preamble) {
1631 validate_assert(state, impl->function->is_entrypoint);
1632 validate_assert(state, impl->preamble->is_preamble);
1633 }
1634
1635 validate_assert(state, exec_list_is_empty(&impl->end_block->instr_list));
1636 validate_assert(state, impl->end_block->successors[0] == NULL);
1637 validate_assert(state, impl->end_block->successors[1] == NULL);
1638
1639 state->impl = impl;
1640 state->parent_node = &impl->cf_node;
1641
1642 exec_list_validate(&impl->locals);
1643 nir_foreach_function_temp_variable(var, impl) {
1644 validate_var_decl(var, nir_var_function_temp, state);
1645 }
1646
1647 state->ssa_defs_found = reralloc(state->mem_ctx, state->ssa_defs_found,
1648 BITSET_WORD, BITSET_WORDS(impl->ssa_alloc));
1649 memset(state->ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) * sizeof(BITSET_WORD));
1650
1651 _mesa_set_clear(state->blocks, NULL);
1652 _mesa_set_resize(state->blocks, impl->num_blocks);
1653 if (impl->structured)
1654 collect_blocks(&impl->body, state);
1655 else
1656 collect_unstructured_blocks(impl, state);
1657 _mesa_set_add(state->blocks, impl->end_block);
1658 validate_assert(state, !exec_list_is_empty(&impl->body));
1659 foreach_list_typed(nir_cf_node, node, node, &impl->body) {
1660 validate_cf_node(node, state);
1661 }
1662 validate_end_block(impl->end_block, state);
1663
1664 /* We must have seen every source by now. This also means that we've untagged
1665 * every source, so we have valid (unaugmented) NIR once again.
1666 */
1667 validate_assert(state, state->nr_tagged_srcs == 0);
1668
1669 static int validate_dominance = -1;
1670 if (validate_dominance < 0) {
1671 validate_dominance =
1672 NIR_DEBUG(VALIDATE_SSA_DOMINANCE);
1673 }
1674 if (validate_dominance) {
1675 memset(state->ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) * sizeof(BITSET_WORD));
1676 validate_ssa_dominance(impl, state);
1677 }
1678 }
1679
1680 static void
validate_function(nir_function * func,validate_state * state)1681 validate_function(nir_function *func, validate_state *state)
1682 {
1683 if (func->impl != NULL) {
1684 validate_assert(state, func->impl->function == func);
1685 validate_function_impl(func->impl, state);
1686 }
1687 }
1688
1689 static void
init_validate_state(validate_state * state)1690 init_validate_state(validate_state *state)
1691 {
1692 state->mem_ctx = ralloc_context(NULL);
1693 state->ssa_defs_found = NULL;
1694 state->blocks = _mesa_pointer_set_create(state->mem_ctx);
1695 state->var_defs = _mesa_pointer_hash_table_create(state->mem_ctx);
1696 state->errors = _mesa_pointer_hash_table_create(state->mem_ctx);
1697 state->nr_tagged_srcs = 0;
1698
1699 state->loop = NULL;
1700 state->in_loop_continue_construct = false;
1701 state->instr = NULL;
1702 state->var = NULL;
1703 }
1704
1705 static void
destroy_validate_state(validate_state * state)1706 destroy_validate_state(validate_state *state)
1707 {
1708 ralloc_free(state->mem_ctx);
1709 }
1710
1711 simple_mtx_t fail_dump_mutex = SIMPLE_MTX_INITIALIZER;
1712
1713 static void
dump_errors(validate_state * state,const char * when)1714 dump_errors(validate_state *state, const char *when)
1715 {
1716 struct hash_table *errors = state->errors;
1717
1718 /* Lock around dumping so that we get clean dumps in a multi-threaded
1719 * scenario
1720 */
1721 simple_mtx_lock(&fail_dump_mutex);
1722
1723 if (when) {
1724 fprintf(stderr, "NIR validation failed %s\n", when);
1725 fprintf(stderr, "%d errors:\n", _mesa_hash_table_num_entries(errors));
1726 } else {
1727 fprintf(stderr, "NIR validation failed with %d errors:\n",
1728 _mesa_hash_table_num_entries(errors));
1729 }
1730
1731 nir_print_shader_annotated(state->shader, stderr, errors);
1732
1733 if (_mesa_hash_table_num_entries(errors) > 0) {
1734 fprintf(stderr, "%d additional errors:\n",
1735 _mesa_hash_table_num_entries(errors));
1736 hash_table_foreach(errors, entry) {
1737 fprintf(stderr, "%s\n", (char *)entry->data);
1738 }
1739 }
1740
1741 simple_mtx_unlock(&fail_dump_mutex);
1742
1743 abort();
1744 }
1745
1746 void
nir_validate_shader(nir_shader * shader,const char * when)1747 nir_validate_shader(nir_shader *shader, const char *when)
1748 {
1749 if (NIR_DEBUG(NOVALIDATE))
1750 return;
1751
1752 validate_state state;
1753 init_validate_state(&state);
1754
1755 state.shader = shader;
1756
1757 nir_variable_mode valid_modes =
1758 nir_var_shader_in |
1759 nir_var_shader_out |
1760 nir_var_shader_temp |
1761 nir_var_uniform |
1762 nir_var_mem_ubo |
1763 nir_var_system_value |
1764 nir_var_mem_ssbo |
1765 nir_var_mem_shared |
1766 nir_var_mem_global |
1767 nir_var_mem_push_const |
1768 nir_var_mem_constant |
1769 nir_var_image;
1770
1771 if (gl_shader_stage_is_callable(shader->info.stage))
1772 valid_modes |= nir_var_shader_call_data;
1773
1774 if (shader->info.stage == MESA_SHADER_ANY_HIT ||
1775 shader->info.stage == MESA_SHADER_CLOSEST_HIT ||
1776 shader->info.stage == MESA_SHADER_INTERSECTION)
1777 valid_modes |= nir_var_ray_hit_attrib;
1778
1779 if (shader->info.stage == MESA_SHADER_TASK ||
1780 shader->info.stage == MESA_SHADER_MESH)
1781 valid_modes |= nir_var_mem_task_payload;
1782
1783 if (shader->info.stage == MESA_SHADER_COMPUTE)
1784 valid_modes |= nir_var_mem_node_payload |
1785 nir_var_mem_node_payload_in;
1786
1787 exec_list_validate(&shader->variables);
1788 nir_foreach_variable_in_shader(var, shader)
1789 validate_var_decl(var, valid_modes, &state);
1790
1791 exec_list_validate(&shader->functions);
1792 foreach_list_typed(nir_function, func, node, &shader->functions) {
1793 validate_function(func, &state);
1794 }
1795
1796 if (shader->xfb_info != NULL) {
1797 /* At least validate that, if nir_shader::xfb_info exists, the shader
1798 * has real transform feedback going on.
1799 */
1800 validate_assert(&state, shader->info.stage == MESA_SHADER_VERTEX ||
1801 shader->info.stage == MESA_SHADER_TESS_EVAL ||
1802 shader->info.stage == MESA_SHADER_GEOMETRY);
1803 validate_assert(&state, shader->xfb_info->buffers_written != 0);
1804 validate_assert(&state, shader->xfb_info->streams_written != 0);
1805 validate_assert(&state, shader->xfb_info->output_count > 0);
1806 }
1807
1808 if (_mesa_hash_table_num_entries(state.errors) > 0)
1809 dump_errors(&state, when);
1810
1811 destroy_validate_state(&state);
1812 }
1813
1814 void
nir_validate_ssa_dominance(nir_shader * shader,const char * when)1815 nir_validate_ssa_dominance(nir_shader *shader, const char *when)
1816 {
1817 if (NIR_DEBUG(NOVALIDATE))
1818 return;
1819
1820 validate_state state;
1821 init_validate_state(&state);
1822
1823 state.shader = shader;
1824
1825 nir_foreach_function_impl(impl, shader) {
1826 state.ssa_defs_found = reralloc(state.mem_ctx, state.ssa_defs_found,
1827 BITSET_WORD,
1828 BITSET_WORDS(impl->ssa_alloc));
1829 memset(state.ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) * sizeof(BITSET_WORD));
1830
1831 state.impl = impl;
1832 validate_ssa_dominance(impl, &state);
1833 }
1834
1835 if (_mesa_hash_table_num_entries(state.errors) > 0)
1836 dump_errors(&state, when);
1837
1838 destroy_validate_state(&state);
1839 }
1840
1841 #endif /* NDEBUG */
1842