1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef ELK_NIR_H
25 #define ELK_NIR_H
26
27 #include "compiler/nir/nir.h"
28 #include "elk_compiler.h"
29 #include "nir_builder.h"
30
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34
35 int elk_type_size_vec4(const struct glsl_type *type, bool bindless);
36 int elk_type_size_dvec4(const struct glsl_type *type, bool bindless);
37
38 static inline int
elk_type_size_scalar_bytes(const struct glsl_type * type,bool bindless)39 elk_type_size_scalar_bytes(const struct glsl_type *type, bool bindless)
40 {
41 return glsl_count_dword_slots(type, bindless) * 4;
42 }
43
44 static inline int
elk_type_size_vec4_bytes(const struct glsl_type * type,bool bindless)45 elk_type_size_vec4_bytes(const struct glsl_type *type, bool bindless)
46 {
47 return elk_type_size_vec4(type, bindless) * 16;
48 }
49
50 /* Flags set in the instr->pass_flags field by i965 analysis passes */
51 enum {
52 ELK_NIR_NON_BOOLEAN = 0x0,
53
54 /* Indicates that the given instruction's destination is a boolean
55 * value but that it needs to be resolved before it can be used.
56 * On Gen <= 5, CMP instructions return a 32-bit value where the bottom
57 * bit represents the actual true/false value of the compare and the top
58 * 31 bits are undefined. In order to use this value, we have to do a
59 * "resolve" operation by replacing the value of the CMP with -(x & 1)
60 * to sign-extend the bottom bit to 0/~0.
61 */
62 ELK_NIR_BOOLEAN_NEEDS_RESOLVE = 0x1,
63
64 /* Indicates that the given instruction's destination is a boolean
65 * value that has intentionally been left unresolved. Not all boolean
66 * values need to be resolved immediately. For instance, if we have
67 *
68 * CMP r1 r2 r3
69 * CMP r4 r5 r6
70 * AND r7 r1 r4
71 *
72 * We don't have to resolve the result of the two CMP instructions
73 * immediately because the AND still does an AND of the bottom bits.
74 * Instead, we can save ourselves instructions by delaying the resolve
75 * until after the AND. The result of the two CMP instructions is left
76 * as ELK_NIR_BOOLEAN_UNRESOLVED.
77 */
78 ELK_NIR_BOOLEAN_UNRESOLVED = 0x2,
79
80 /* Indicates a that the given instruction's destination is a boolean
81 * value that does not need a resolve. For instance, if you AND two
82 * values that are ELK_NIR_BOOLEAN_NEEDS_RESOLVE then we know that both
83 * values will be 0/~0 before we get them and the result of the AND is
84 * also guaranteed to be 0/~0 and does not need a resolve.
85 */
86 ELK_NIR_BOOLEAN_NO_RESOLVE = 0x3,
87
88 /* A mask to mask the boolean status values off of instr->pass_flags */
89 ELK_NIR_BOOLEAN_MASK = 0x3,
90 };
91
92 void elk_nir_analyze_boolean_resolves(nir_shader *nir);
93
94 struct elk_nir_compiler_opts {
95 /* Soft floating point implementation shader */
96 const nir_shader *softfp64;
97
98 /* Whether robust image access is enabled */
99 bool robust_image_access;
100
101 /* Input vertices for TCS stage (0 means dynamic) */
102 unsigned input_vertices;
103 };
104
105 /* UBO surface index can come in 2 flavors :
106 * - nir_intrinsic_resource_intel
107 * - anything else
108 *
109 * In the first case, checking that the surface index is const requires
110 * checking resource_intel::src[1]. In any other case it's a simple
111 * nir_src_is_const().
112 *
113 * This function should only be called on src[0] of load_ubo intrinsics.
114 */
115 static inline bool
elk_nir_ubo_surface_index_is_pushable(nir_src src)116 elk_nir_ubo_surface_index_is_pushable(nir_src src)
117 {
118 nir_intrinsic_instr *intrin =
119 src.ssa->parent_instr->type == nir_instr_type_intrinsic ?
120 nir_instr_as_intrinsic(src.ssa->parent_instr) : NULL;
121
122 if (intrin && intrin->intrinsic == nir_intrinsic_resource_intel) {
123 return (nir_intrinsic_resource_access_intel(intrin) &
124 nir_resource_intel_pushable);
125 }
126
127 return nir_src_is_const(src);
128 }
129
130 static inline unsigned
elk_nir_ubo_surface_index_get_push_block(nir_src src)131 elk_nir_ubo_surface_index_get_push_block(nir_src src)
132 {
133 if (nir_src_is_const(src))
134 return nir_src_as_uint(src);
135
136 if (!elk_nir_ubo_surface_index_is_pushable(src))
137 return UINT32_MAX;
138
139 assert(src.ssa->parent_instr->type == nir_instr_type_intrinsic);
140
141 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(src.ssa->parent_instr);
142 assert(intrin->intrinsic == nir_intrinsic_resource_intel);
143
144 return nir_intrinsic_resource_block_intel(intrin);
145 }
146
147 /* This helper return the binding table index of a surface access (any
148 * buffer/image/etc...). It works off the source of one of the intrinsics
149 * (load_ubo, load_ssbo, store_ssbo, load_image, store_image, etc...).
150 *
151 * If the source is constant, then this is the binding table index. If we're
152 * going through a resource_intel intel intrinsic, then we need to check
153 * src[1] of that intrinsic.
154 */
155 static inline unsigned
elk_nir_ubo_surface_index_get_bti(nir_src src)156 elk_nir_ubo_surface_index_get_bti(nir_src src)
157 {
158 if (nir_src_is_const(src))
159 return nir_src_as_uint(src);
160
161 assert(src.ssa->parent_instr->type == nir_instr_type_intrinsic);
162
163 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(src.ssa->parent_instr);
164 if (!intrin || intrin->intrinsic != nir_intrinsic_resource_intel)
165 return UINT32_MAX;
166
167 /* In practice we could even drop this intrinsic because the bindless
168 * access always operate from a base offset coming from a push constant, so
169 * they can never be constant.
170 */
171 if (nir_intrinsic_resource_access_intel(intrin) &
172 nir_resource_intel_bindless)
173 return UINT32_MAX;
174
175 if (!nir_src_is_const(intrin->src[1]))
176 return UINT32_MAX;
177
178 return nir_src_as_uint(intrin->src[1]);
179 }
180
181 void elk_preprocess_nir(const struct elk_compiler *compiler,
182 nir_shader *nir,
183 const struct elk_nir_compiler_opts *opts);
184
185 void
186 elk_nir_link_shaders(const struct elk_compiler *compiler,
187 nir_shader *producer, nir_shader *consumer);
188
189 bool elk_nir_lower_cs_intrinsics(nir_shader *nir,
190 const struct intel_device_info *devinfo,
191 struct elk_cs_prog_data *prog_data);
192 bool elk_nir_lower_alpha_to_coverage(nir_shader *shader,
193 const struct elk_wm_prog_key *key,
194 const struct elk_wm_prog_data *prog_data);
195 void elk_nir_lower_vs_inputs(nir_shader *nir,
196 bool edgeflag_is_last,
197 const uint8_t *vs_attrib_wa_flags);
198 void elk_nir_lower_vue_inputs(nir_shader *nir,
199 const struct intel_vue_map *vue_map);
200 void elk_nir_lower_tes_inputs(nir_shader *nir, const struct intel_vue_map *vue);
201 void elk_nir_lower_fs_inputs(nir_shader *nir,
202 const struct intel_device_info *devinfo,
203 const struct elk_wm_prog_key *key);
204 void elk_nir_lower_vue_outputs(nir_shader *nir);
205 void elk_nir_lower_tcs_outputs(nir_shader *nir, const struct intel_vue_map *vue,
206 enum tess_primitive_mode tes_primitive_mode);
207 void elk_nir_lower_fs_outputs(nir_shader *nir);
208
209 bool elk_nir_lower_cmat(nir_shader *nir, unsigned subgroup_size);
210
211 bool elk_nir_lower_shading_rate_output(nir_shader *nir);
212
213 bool elk_nir_lower_sparse_intrinsics(nir_shader *nir);
214
215 struct elk_nir_lower_storage_image_opts {
216 const struct intel_device_info *devinfo;
217
218 bool lower_loads;
219 bool lower_stores;
220 bool lower_atomics;
221 bool lower_get_size;
222 };
223
224 bool elk_nir_lower_storage_image(nir_shader *nir,
225 const struct elk_nir_lower_storage_image_opts *opts);
226
227 bool elk_nir_lower_mem_access_bit_sizes(nir_shader *shader,
228 const struct
229 intel_device_info *devinfo);
230
231 void elk_postprocess_nir(nir_shader *nir,
232 const struct elk_compiler *compiler,
233 bool debug_enabled,
234 enum elk_robustness_flags robust_flags);
235
236 bool elk_nir_apply_attribute_workarounds(nir_shader *nir,
237 const uint8_t *attrib_wa_flags);
238
239 bool elk_nir_apply_trig_workarounds(nir_shader *nir);
240
241 bool elk_nir_limit_trig_input_range_workaround(nir_shader *nir);
242
243 void elk_nir_apply_key(nir_shader *nir,
244 const struct elk_compiler *compiler,
245 const struct elk_base_prog_key *key,
246 unsigned max_subgroup_size);
247
248 unsigned elk_nir_api_subgroup_size(const nir_shader *nir,
249 unsigned hw_subgroup_size);
250
251 bool elk_nir_should_vectorize_mem(unsigned align_mul, unsigned align_offset,
252 unsigned bit_size,
253 unsigned num_components,
254 nir_intrinsic_instr *low,
255 nir_intrinsic_instr *high,
256 void *data);
257
258 void elk_nir_analyze_ubo_ranges(const struct elk_compiler *compiler,
259 nir_shader *nir,
260 struct elk_ubo_range out_ranges[4]);
261
262 void elk_nir_optimize(nir_shader *nir, bool is_scalar,
263 const struct intel_device_info *devinfo);
264
265 nir_shader *elk_nir_create_passthrough_tcs(void *mem_ctx,
266 const struct elk_compiler *compiler,
267 const struct elk_tcs_prog_key *key);
268
269 #define ELK_NIR_FRAG_OUTPUT_INDEX_SHIFT 0
270 #define ELK_NIR_FRAG_OUTPUT_INDEX_MASK INTEL_MASK(0, 0)
271 #define ELK_NIR_FRAG_OUTPUT_LOCATION_SHIFT 1
272 #define ELK_NIR_FRAG_OUTPUT_LOCATION_MASK INTEL_MASK(31, 1)
273
274 bool elk_nir_move_interpolation_to_top(nir_shader *nir);
275 nir_def *elk_nir_load_global_const(nir_builder *b,
276 nir_intrinsic_instr *load_uniform,
277 nir_def *base_addr,
278 unsigned off);
279
280 const struct glsl_type *elk_nir_get_var_type(const struct nir_shader *nir,
281 nir_variable *var);
282
283 void elk_nir_adjust_payload(nir_shader *shader);
284
285 #ifdef __cplusplus
286 }
287 #endif
288
289 #endif /* ELK_NIR_H */
290