1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_nir.h"
25 #include "nir/nir_builder.h"
26 #include "util/u_debug.h"
27
28 /**
29 * This file implements the lowering required for VK_KHR_multiview.
30 *
31 * When possible, Primitive Replication is used and the shader is modified to
32 * make gl_Position an array and fill it with values for each view.
33 *
34 * Otherwise we implement multiview using instanced rendering. The number of
35 * instances in each draw call is multiplied by the number of views in the
36 * subpass. Then, in the shader, we divide gl_InstanceId by the number of
37 * views and use gl_InstanceId % view_count to compute the actual ViewIndex.
38 */
39
40 struct lower_multiview_state {
41 nir_builder builder;
42
43 uint32_t view_mask;
44
45 nir_def *instance_id_with_views;
46 nir_def *instance_id;
47 nir_def *view_index;
48 };
49
50 static nir_def *
build_instance_id(struct lower_multiview_state * state)51 build_instance_id(struct lower_multiview_state *state)
52 {
53 assert(state->builder.shader->info.stage == MESA_SHADER_VERTEX);
54
55 if (state->instance_id == NULL) {
56 nir_builder *b = &state->builder;
57
58 b->cursor =
59 nir_after_instr(state->instance_id_with_views->parent_instr);
60
61 /* We use instancing for implementing multiview. The actual instance id
62 * is given by dividing instance_id by the number of views in this
63 * subpass.
64 */
65 state->instance_id =
66 nir_idiv(b, state->instance_id_with_views,
67 nir_imm_int(b, util_bitcount(state->view_mask)));
68 }
69
70 return state->instance_id;
71 }
72
73 static nir_def *
build_view_index(struct lower_multiview_state * state)74 build_view_index(struct lower_multiview_state *state)
75 {
76 assert(state->builder.shader->info.stage != MESA_SHADER_FRAGMENT);
77
78 if (state->view_index == NULL) {
79 nir_builder *b = &state->builder;
80
81 b->cursor =
82 nir_after_instr(state->instance_id_with_views->parent_instr);
83
84 assert(state->view_mask != 0);
85 if (util_bitcount(state->view_mask) == 1) {
86 /* Set the view index directly. */
87 state->view_index = nir_imm_int(b, ffs(state->view_mask) - 1);
88 } else if (state->builder.shader->info.stage == MESA_SHADER_VERTEX) {
89 /* We only support 16 viewports */
90 assert((state->view_mask & 0xffff0000) == 0);
91
92 /* We use instancing for implementing multiview. The compacted view
93 * id is given by instance_id % view_count. We then have to convert
94 * that to an actual view id.
95 */
96 nir_def *compacted =
97 nir_umod_imm(b, state->instance_id_with_views,
98 util_bitcount(state->view_mask));
99
100 if (util_is_power_of_two_or_zero(state->view_mask + 1)) {
101 /* If we have a full view mask, then compacted is what we want */
102 state->view_index = compacted;
103 } else {
104 /* Now we define a map from compacted view index to the actual
105 * view index that's based on the view_mask. The map is given by
106 * 16 nibbles, each of which is a value from 0 to 15.
107 */
108 uint64_t remap = 0;
109 uint32_t i = 0;
110 u_foreach_bit(bit, state->view_mask) {
111 assert(bit < 16);
112 remap |= (uint64_t)bit << (i++ * 4);
113 }
114
115 nir_def *shift = nir_imul_imm(b, compacted, 4);
116
117 /* One of these days, when we have int64 everywhere, this will be
118 * easier.
119 */
120 nir_def *shifted;
121 if (remap <= UINT32_MAX) {
122 shifted = nir_ushr(b, nir_imm_int(b, remap), shift);
123 } else {
124 nir_def *shifted_low =
125 nir_ushr(b, nir_imm_int(b, remap), shift);
126 nir_def *shifted_high =
127 nir_ushr(b, nir_imm_int(b, remap >> 32),
128 nir_iadd_imm(b, shift, -32));
129 shifted = nir_bcsel(b, nir_ilt_imm(b, shift, 32),
130 shifted_low, shifted_high);
131 }
132 state->view_index = nir_iand_imm(b, shifted, 0xf);
133 }
134 } else {
135 const struct glsl_type *type = glsl_int_type();
136 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
137 b->shader->info.stage == MESA_SHADER_GEOMETRY)
138 type = glsl_array_type(type, 1, 0);
139
140 nir_variable *idx_var =
141 nir_variable_create(b->shader, nir_var_shader_in,
142 type, "view index");
143 idx_var->data.location = VARYING_SLOT_VIEW_INDEX;
144 if (b->shader->info.stage == MESA_SHADER_FRAGMENT)
145 idx_var->data.interpolation = INTERP_MODE_FLAT;
146
147 nir_deref_instr *deref = nir_build_deref_var(b, idx_var);
148 if (glsl_type_is_array(type))
149 deref = nir_build_deref_array_imm(b, deref, 0);
150
151 state->view_index = nir_load_deref(b, deref);
152 }
153 }
154
155 return state->view_index;
156 }
157
158 static bool
is_load_view_index(const nir_instr * instr,const void * data)159 is_load_view_index(const nir_instr *instr, const void *data)
160 {
161 return instr->type == nir_instr_type_intrinsic &&
162 nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_view_index;
163 }
164
165 static nir_def *
replace_load_view_index_with_zero(struct nir_builder * b,nir_instr * instr,void * data)166 replace_load_view_index_with_zero(struct nir_builder *b,
167 nir_instr *instr, void *data)
168 {
169 assert(is_load_view_index(instr, data));
170 return nir_imm_zero(b, 1, 32);
171 }
172
173 static nir_def *
replace_load_view_index_with_layer_id(struct nir_builder * b,nir_instr * instr,void * data)174 replace_load_view_index_with_layer_id(struct nir_builder *b,
175 nir_instr *instr, void *data)
176 {
177 assert(is_load_view_index(instr, data));
178 return nir_load_layer_id(b);
179 }
180
181 bool
anv_nir_lower_multiview(nir_shader * shader,uint32_t view_mask,bool use_primitive_replication)182 anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask,
183 bool use_primitive_replication)
184 {
185 assert(shader->info.stage != MESA_SHADER_COMPUTE);
186
187 /* If multiview isn't enabled, just lower the ViewIndex builtin to zero. */
188 if (view_mask == 0) {
189 return nir_shader_lower_instructions(shader, is_load_view_index,
190 replace_load_view_index_with_zero, NULL);
191 }
192
193 if (shader->info.stage == MESA_SHADER_FRAGMENT) {
194 return nir_shader_lower_instructions(shader, is_load_view_index,
195 replace_load_view_index_with_layer_id, NULL);
196 }
197
198 /* This pass assumes a single entrypoint */
199 nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader);
200
201 /* Primitive Replication allows a shader to write different positions for
202 * each view in the same execution. If only the position depends on the
203 * view, then it is possible to use the feature instead of instancing to
204 * implement multiview.
205 */
206 if (use_primitive_replication) {
207 bool progress = nir_lower_multiview(shader, view_mask);
208
209 if (progress) {
210 nir_builder b = nir_builder_at(nir_before_impl(entrypoint));
211
212 /* Fill Layer ID with zero. Replication will use that as base to
213 * apply the RTAI offsets.
214 */
215 nir_variable *layer_id_out =
216 nir_variable_create(shader, nir_var_shader_out,
217 glsl_int_type(), "layer ID");
218 layer_id_out->data.location = VARYING_SLOT_LAYER;
219 nir_store_var(&b, layer_id_out, nir_imm_zero(&b, 1, 32), 0x1);
220 }
221
222 return progress;
223 }
224
225 struct lower_multiview_state state = {
226 .view_mask = view_mask,
227 };
228
229 state.builder = nir_builder_at(nir_before_impl(entrypoint));
230 nir_builder *b = &state.builder;
231
232 /* Save the original "instance ID" which is the actual instance ID
233 * multiplied by the number of views.
234 */
235 state.instance_id_with_views = nir_load_instance_id(b);
236
237 /* The view index is available in all stages but the instance id is only
238 * available in the VS. If it's not a fragment shader, we need to pass
239 * the view index on to the next stage.
240 */
241 nir_def *view_index = build_view_index(&state);
242
243 assert(view_index->parent_instr->block == nir_start_block(entrypoint));
244 b->cursor = nir_after_instr(view_index->parent_instr);
245
246 /* Unless there is only one possible view index (that would be set
247 * directly), pass it to the next stage.
248 */
249 nir_variable *view_index_out = NULL;
250 if (util_bitcount(state.view_mask) != 1) {
251 view_index_out = nir_variable_create(shader, nir_var_shader_out,
252 glsl_int_type(), "view index");
253 view_index_out->data.location = VARYING_SLOT_VIEW_INDEX;
254 }
255
256 nir_variable *layer_id_out =
257 nir_variable_create(shader, nir_var_shader_out,
258 glsl_int_type(), "layer ID");
259 layer_id_out->data.location = VARYING_SLOT_LAYER;
260
261 if (shader->info.stage != MESA_SHADER_GEOMETRY) {
262 if (view_index_out)
263 nir_store_var(b, view_index_out, view_index, 0x1);
264
265 nir_store_var(b, layer_id_out, view_index, 0x1);
266 }
267
268 nir_foreach_block(block, entrypoint) {
269 nir_foreach_instr_safe(instr, block) {
270 if (instr->type != nir_instr_type_intrinsic)
271 continue;
272
273 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
274
275 switch (load->intrinsic) {
276 case nir_intrinsic_load_instance_id:
277 if (&load->def != state.instance_id_with_views) {
278 nir_def_replace(&load->def, build_instance_id(&state));
279 }
280 break;
281 case nir_intrinsic_load_view_index:
282 nir_def_replace(&load->def, view_index);
283 break;
284 case nir_intrinsic_emit_vertex_with_counter:
285 /* In geometry shaders, outputs become undefined after every
286 * EmitVertex() call. We need to re-emit them for each vertex.
287 */
288 b->cursor = nir_before_instr(instr);
289 if (view_index_out)
290 nir_store_var(b, view_index_out, view_index, 0x1);
291
292 nir_store_var(b, layer_id_out, view_index, 0x1);
293 break;
294 default:
295 break;
296 }
297 }
298 }
299
300 nir_metadata_preserve(entrypoint, nir_metadata_control_flow);
301
302 return true;
303 }
304
305 bool
anv_check_for_primitive_replication(struct anv_device * device,VkShaderStageFlags stages,nir_shader ** shaders,uint32_t view_mask)306 anv_check_for_primitive_replication(struct anv_device *device,
307 VkShaderStageFlags stages,
308 nir_shader **shaders,
309 uint32_t view_mask)
310 {
311 assert(device->info->ver >= 12);
312
313 static int primitive_replication_max_views = -1;
314 if (primitive_replication_max_views < 0) {
315 /* TODO: Figure out why we are not getting same benefits for larger than
316 * 2 views. For now use Primitive Replication just for the 2-view case
317 * by default.
318 */
319 const unsigned default_max_views = 2;
320
321 primitive_replication_max_views =
322 MIN2(MAX_VIEWS_FOR_PRIMITIVE_REPLICATION,
323 debug_get_num_option("ANV_PRIMITIVE_REPLICATION_MAX_VIEWS",
324 default_max_views));
325 }
326
327 /* TODO: We should be able to support replication at 'geometry' stages
328 * later than Vertex. In that case only the last stage can refer to
329 * gl_ViewIndex.
330 */
331 if (stages & ~(VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))
332 return false;
333
334 /* It's possible we have no vertex shader yet (with pipeline libraries) */
335 if (!(stages & VK_SHADER_STAGE_VERTEX_BIT))
336 return false;
337
338 int view_count = util_bitcount(view_mask);
339 if (view_count == 1 || view_count > primitive_replication_max_views)
340 return false;
341
342 return nir_can_lower_multiview(shaders[MESA_SHADER_VERTEX]);
343 }
344