xref: /aosp_15_r20/external/mesa3d/src/compiler/nir/nir_lower_robust_access.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2023 Valve Corpoation
3  * Copyright 2020 Raspberry Pi Ltd
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include "nir.h"
8 #include "nir_builder.h"
9 #include "nir_intrinsics_indices.h"
10 
11 static void
rewrite_offset(nir_builder * b,nir_intrinsic_instr * instr,uint32_t type_sz,uint32_t offset_src,nir_def * size)12 rewrite_offset(nir_builder *b, nir_intrinsic_instr *instr,
13                uint32_t type_sz, uint32_t offset_src, nir_def *size)
14 {
15    /* Compute the maximum offset being accessed and if it is out of bounds
16     * rewrite it to 0 to ensure the access is within bounds.
17     */
18    const uint32_t access_size = instr->num_components * type_sz;
19    nir_def *max_access_offset =
20       nir_iadd_imm(b, instr->src[offset_src].ssa, access_size - 1);
21    nir_def *offset =
22       nir_bcsel(b, nir_uge(b, max_access_offset, size), nir_imm_int(b, 0),
23                 instr->src[offset_src].ssa);
24 
25    /* Rewrite offset */
26    nir_src_rewrite(&instr->src[offset_src], offset);
27 }
28 
29 /*
30  * Wrap a intrinsic in an if, predicated on a "valid" condition. If the
31  * intrinsic produces a destination, it will be zero in the invalid case.
32  */
33 static void
wrap_in_if(nir_builder * b,nir_intrinsic_instr * instr,nir_def * valid)34 wrap_in_if(nir_builder *b, nir_intrinsic_instr *instr, nir_def *valid)
35 {
36    bool has_dest = nir_intrinsic_infos[instr->intrinsic].has_dest;
37    nir_def *res, *zero;
38 
39    if (has_dest) {
40       zero = nir_imm_zero(b, instr->def.num_components,
41                           instr->def.bit_size);
42    }
43 
44    nir_push_if(b, valid);
45    {
46       nir_instr *orig = nir_instr_clone(b->shader, &instr->instr);
47       nir_builder_instr_insert(b, orig);
48 
49       if (has_dest)
50          res = &nir_instr_as_intrinsic(orig)->def;
51    }
52    nir_pop_if(b, NULL);
53 
54    if (has_dest)
55       nir_def_rewrite_uses(&instr->def, nir_if_phi(b, res, zero));
56 
57    /* We've cloned and wrapped, so drop original instruction */
58    nir_instr_remove(&instr->instr);
59 }
60 
61 static void
lower_buffer_load(nir_builder * b,nir_intrinsic_instr * instr,const nir_lower_robust_access_options * opts)62 lower_buffer_load(nir_builder *b,
63                   nir_intrinsic_instr *instr,
64                   const nir_lower_robust_access_options *opts)
65 {
66    uint32_t type_sz = instr->def.bit_size / 8;
67    nir_def *size;
68    nir_def *index = instr->src[0].ssa;
69 
70    if (instr->intrinsic == nir_intrinsic_load_ubo) {
71       size = nir_get_ubo_size(b, 32, index);
72    } else {
73       size = nir_get_ssbo_size(b, index);
74    }
75 
76    rewrite_offset(b, instr, type_sz, 1, size);
77 }
78 
79 static void
lower_buffer_store(nir_builder * b,nir_intrinsic_instr * instr)80 lower_buffer_store(nir_builder *b, nir_intrinsic_instr *instr)
81 {
82    uint32_t type_sz = nir_src_bit_size(instr->src[0]) / 8;
83    rewrite_offset(b, instr, type_sz, 2,
84                   nir_get_ssbo_size(b, instr->src[1].ssa));
85 }
86 
87 static void
lower_buffer_atomic(nir_builder * b,nir_intrinsic_instr * instr)88 lower_buffer_atomic(nir_builder *b, nir_intrinsic_instr *instr)
89 {
90    rewrite_offset(b, instr, 4, 1, nir_get_ssbo_size(b, instr->src[0].ssa));
91 }
92 
93 static void
lower_buffer_shared(nir_builder * b,nir_intrinsic_instr * instr)94 lower_buffer_shared(nir_builder *b, nir_intrinsic_instr *instr)
95 {
96    uint32_t type_sz, offset_src;
97    if (instr->intrinsic == nir_intrinsic_load_shared) {
98       offset_src = 0;
99       type_sz = instr->def.bit_size / 8;
100    } else if (instr->intrinsic == nir_intrinsic_store_shared) {
101       offset_src = 1;
102       type_sz = nir_src_bit_size(instr->src[0]) / 8;
103    } else {
104       /* atomic */
105       offset_src = 0;
106       type_sz = 4;
107    }
108 
109    rewrite_offset(b, instr, type_sz, offset_src,
110                   nir_imm_int(b, b->shader->info.shared_size));
111 }
112 
113 static bool
lower_image(nir_builder * b,nir_intrinsic_instr * instr,const nir_lower_robust_access_options * opts,bool deref)114 lower_image(nir_builder *b,
115             nir_intrinsic_instr *instr,
116             const nir_lower_robust_access_options *opts, bool deref)
117 {
118    enum glsl_sampler_dim dim = nir_intrinsic_image_dim(instr);
119    bool atomic = (instr->intrinsic == nir_intrinsic_image_atomic ||
120                   instr->intrinsic == nir_intrinsic_image_atomic_swap ||
121                   instr->intrinsic == nir_intrinsic_image_deref_atomic ||
122                   instr->intrinsic == nir_intrinsic_image_deref_atomic_swap);
123    if (!opts->lower_image &&
124        !(opts->lower_buffer_image && dim == GLSL_SAMPLER_DIM_BUF) &&
125        !(opts->lower_image_atomic && atomic))
126       return false;
127 
128    uint32_t num_coords = nir_image_intrinsic_coord_components(instr);
129    bool is_array = nir_intrinsic_image_array(instr);
130    nir_def *coord = instr->src[1].ssa;
131 
132    /* Get image size. imageSize for cubes returns the size of a single face. */
133    unsigned size_components = num_coords;
134    if (dim == GLSL_SAMPLER_DIM_CUBE && !is_array)
135       size_components -= 1;
136 
137    nir_def *size = nir_image_size(b, size_components, 32,
138                                   instr->src[0].ssa, nir_imm_int(b, 0),
139                                   .image_array = is_array, .image_dim = dim);
140    if (deref) {
141       nir_instr_as_intrinsic(size->parent_instr)->intrinsic =
142          nir_intrinsic_image_deref_size;
143    }
144 
145    if (dim == GLSL_SAMPLER_DIM_CUBE) {
146       nir_def *z = is_array ? nir_imul_imm(b, nir_channel(b, size, 2), 6)
147                             : nir_imm_int(b, 6);
148 
149       size = nir_vec3(b, nir_channel(b, size, 0), nir_channel(b, size, 1), z);
150    }
151 
152    nir_def *in_bounds = nir_ball(b, nir_ult(b, coord, size));
153 
154    if (dim == GLSL_SAMPLER_DIM_MS) {
155       nir_def *sample = instr->src[2].ssa;
156       nir_def *samples = nir_image_samples(b, 32, instr->src[0].ssa,
157                                            .image_array = is_array, .image_dim = dim);
158       if (deref) {
159          nir_instr_as_intrinsic(samples->parent_instr)->intrinsic =
160             nir_intrinsic_image_deref_samples;
161       }
162 
163       in_bounds = nir_iand(b, in_bounds, nir_ult(b, sample, samples));
164    }
165 
166    /* Only execute if coordinates are in-bounds. Otherwise, return zero. */
167    wrap_in_if(b, instr, in_bounds);
168    return true;
169 }
170 
171 static bool
lower(nir_builder * b,nir_instr * instr,void * _opts)172 lower(nir_builder *b, nir_instr *instr, void *_opts)
173 {
174    const nir_lower_robust_access_options *opts = _opts;
175    if (instr->type != nir_instr_type_intrinsic)
176       return false;
177 
178    nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
179    b->cursor = nir_before_instr(instr);
180 
181    switch (intr->intrinsic) {
182    case nir_intrinsic_image_load:
183    case nir_intrinsic_image_store:
184    case nir_intrinsic_image_atomic:
185    case nir_intrinsic_image_atomic_swap:
186       return lower_image(b, intr, opts, false);
187 
188    case nir_intrinsic_image_deref_load:
189    case nir_intrinsic_image_deref_store:
190    case nir_intrinsic_image_deref_atomic:
191    case nir_intrinsic_image_deref_atomic_swap:
192       return lower_image(b, intr, opts, true);
193 
194    case nir_intrinsic_load_ubo:
195       if (opts->lower_ubo) {
196          lower_buffer_load(b, intr, opts);
197          return true;
198       }
199       return false;
200 
201    case nir_intrinsic_load_ssbo:
202       if (opts->lower_ssbo) {
203          lower_buffer_load(b, intr, opts);
204          return true;
205       }
206       return false;
207    case nir_intrinsic_store_ssbo:
208       if (opts->lower_ssbo) {
209          lower_buffer_store(b, intr);
210          return true;
211       }
212       return false;
213    case nir_intrinsic_ssbo_atomic:
214       if (opts->lower_ssbo) {
215          lower_buffer_atomic(b, intr);
216          return true;
217       }
218       return false;
219 
220    case nir_intrinsic_store_shared:
221    case nir_intrinsic_load_shared:
222    case nir_intrinsic_shared_atomic:
223    case nir_intrinsic_shared_atomic_swap:
224       if (opts->lower_shared) {
225          lower_buffer_shared(b, intr);
226          return true;
227       }
228       return false;
229 
230    default:
231       return false;
232    }
233 }
234 
235 bool
nir_lower_robust_access(nir_shader * s,const nir_lower_robust_access_options * opts)236 nir_lower_robust_access(nir_shader *s,
237                         const nir_lower_robust_access_options *opts)
238 {
239    return nir_shader_instructions_pass(s, lower, nir_metadata_control_flow,
240                                        (void *)opts);
241 }
242