xref: /aosp_15_r20/external/mesa3d/src/amd/vulkan/nir/radv_nir_apply_pipeline_layout.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2020 Valve Corporation
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 #include "ac_descriptors.h"
7 #include "ac_shader_util.h"
8 #include "nir.h"
9 #include "nir_builder.h"
10 #include "radv_descriptor_set.h"
11 #include "radv_device.h"
12 #include "radv_nir.h"
13 #include "radv_physical_device.h"
14 #include "radv_shader.h"
15 #include "radv_shader_args.h"
16 #include "sid.h"
17 
18 typedef struct {
19    enum amd_gfx_level gfx_level;
20    uint32_t address32_hi;
21    bool disable_aniso_single_level;
22    bool has_image_load_dcc_bug;
23    bool disable_tg4_trunc_coord;
24 
25    const struct radv_shader_args *args;
26    const struct radv_shader_info *info;
27    const struct radv_shader_layout *layout;
28 } apply_layout_state;
29 
30 static nir_def *
get_scalar_arg(nir_builder * b,unsigned size,struct ac_arg arg)31 get_scalar_arg(nir_builder *b, unsigned size, struct ac_arg arg)
32 {
33    assert(arg.used);
34    return nir_load_scalar_arg_amd(b, size, .base = arg.arg_index);
35 }
36 
37 static nir_def *
convert_pointer_to_64_bit(nir_builder * b,apply_layout_state * state,nir_def * ptr)38 convert_pointer_to_64_bit(nir_builder *b, apply_layout_state *state, nir_def *ptr)
39 {
40    return nir_pack_64_2x32_split(b, ptr, nir_imm_int(b, state->address32_hi));
41 }
42 
43 static nir_def *
load_desc_ptr(nir_builder * b,apply_layout_state * state,unsigned set)44 load_desc_ptr(nir_builder *b, apply_layout_state *state, unsigned set)
45 {
46    const struct radv_userdata_locations *user_sgprs_locs = &state->info->user_sgprs_locs;
47    if (user_sgprs_locs->shader_data[AC_UD_INDIRECT_DESCRIPTOR_SETS].sgpr_idx != -1) {
48       nir_def *addr = get_scalar_arg(b, 1, state->args->descriptor_sets[0]);
49       addr = convert_pointer_to_64_bit(b, state, addr);
50       return nir_load_smem_amd(b, 1, addr, nir_imm_int(b, set * 4));
51    }
52 
53    assert(state->args->descriptor_sets[set].used);
54    return get_scalar_arg(b, 1, state->args->descriptor_sets[set]);
55 }
56 
57 static void
visit_vulkan_resource_index(nir_builder * b,apply_layout_state * state,nir_intrinsic_instr * intrin)58 visit_vulkan_resource_index(nir_builder *b, apply_layout_state *state, nir_intrinsic_instr *intrin)
59 {
60    unsigned desc_set = nir_intrinsic_desc_set(intrin);
61    unsigned binding = nir_intrinsic_binding(intrin);
62    struct radv_descriptor_set_layout *layout = state->layout->set[desc_set].layout;
63    unsigned offset = layout->binding[binding].offset;
64    unsigned stride;
65 
66    nir_def *set_ptr;
67    if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
68        layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
69       unsigned idx = state->layout->set[desc_set].dynamic_offset_start + layout->binding[binding].dynamic_offset_offset;
70       set_ptr = get_scalar_arg(b, 1, state->args->ac.push_constants);
71       offset = state->layout->push_constant_size + idx * 16;
72       stride = 16;
73    } else {
74       set_ptr = load_desc_ptr(b, state, desc_set);
75       stride = layout->binding[binding].size;
76    }
77 
78    nir_def *binding_ptr = nir_imul_imm(b, intrin->src[0].ssa, stride);
79    nir_instr_as_alu(binding_ptr->parent_instr)->no_unsigned_wrap = true;
80 
81    binding_ptr = nir_iadd_imm(b, binding_ptr, offset);
82    nir_instr_as_alu(binding_ptr->parent_instr)->no_unsigned_wrap = true;
83 
84    if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR) {
85       assert(stride == 16);
86       nir_def_rewrite_uses(&intrin->def, nir_pack_64_2x32_split(b, set_ptr, binding_ptr));
87    } else {
88       nir_def_rewrite_uses(&intrin->def, nir_vec3(b, set_ptr, binding_ptr, nir_imm_int(b, stride)));
89    }
90    nir_instr_remove(&intrin->instr);
91 }
92 
93 static void
visit_vulkan_resource_reindex(nir_builder * b,apply_layout_state * state,nir_intrinsic_instr * intrin)94 visit_vulkan_resource_reindex(nir_builder *b, apply_layout_state *state, nir_intrinsic_instr *intrin)
95 {
96    VkDescriptorType desc_type = nir_intrinsic_desc_type(intrin);
97    if (desc_type == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR) {
98       nir_def *set_ptr = nir_unpack_64_2x32_split_x(b, intrin->src[0].ssa);
99       nir_def *binding_ptr = nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa);
100 
101       nir_def *index = nir_imul_imm(b, intrin->src[1].ssa, 16);
102       nir_instr_as_alu(index->parent_instr)->no_unsigned_wrap = true;
103 
104       binding_ptr = nir_iadd_nuw(b, binding_ptr, index);
105 
106       nir_def_rewrite_uses(&intrin->def, nir_pack_64_2x32_split(b, set_ptr, binding_ptr));
107    } else {
108       assert(desc_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || desc_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
109 
110       nir_def *binding_ptr = nir_channel(b, intrin->src[0].ssa, 1);
111       nir_def *stride = nir_channel(b, intrin->src[0].ssa, 2);
112 
113       nir_def *index = nir_imul(b, intrin->src[1].ssa, stride);
114       nir_instr_as_alu(index->parent_instr)->no_unsigned_wrap = true;
115 
116       binding_ptr = nir_iadd_nuw(b, binding_ptr, index);
117 
118       nir_def_rewrite_uses(&intrin->def, nir_vector_insert_imm(b, intrin->src[0].ssa, binding_ptr, 1));
119    }
120    nir_instr_remove(&intrin->instr);
121 }
122 
123 static void
visit_load_vulkan_descriptor(nir_builder * b,apply_layout_state * state,nir_intrinsic_instr * intrin)124 visit_load_vulkan_descriptor(nir_builder *b, apply_layout_state *state, nir_intrinsic_instr *intrin)
125 {
126    if (nir_intrinsic_desc_type(intrin) == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR) {
127       nir_def *addr = convert_pointer_to_64_bit(b, state,
128                                                 nir_iadd(b, nir_unpack_64_2x32_split_x(b, intrin->src[0].ssa),
129                                                          nir_unpack_64_2x32_split_y(b, intrin->src[0].ssa)));
130       nir_def *desc = nir_build_load_global(b, 1, 64, addr, .access = ACCESS_NON_WRITEABLE);
131 
132       nir_def_rewrite_uses(&intrin->def, desc);
133    } else {
134       nir_def_rewrite_uses(&intrin->def, nir_vector_insert_imm(b, intrin->src[0].ssa, nir_imm_int(b, 0), 2));
135    }
136    nir_instr_remove(&intrin->instr);
137 }
138 
139 static nir_def *
load_inline_buffer_descriptor(nir_builder * b,apply_layout_state * state,nir_def * rsrc)140 load_inline_buffer_descriptor(nir_builder *b, apply_layout_state *state, nir_def *rsrc)
141 {
142    uint32_t desc[4];
143 
144    ac_build_raw_buffer_descriptor(state->gfx_level, (uint64_t)state->address32_hi << 32, 0xffffffff, desc);
145 
146    return nir_vec4(b, rsrc, nir_imm_int(b, desc[1]), nir_imm_int(b, desc[2]), nir_imm_int(b, desc[3]));
147 }
148 
149 static nir_def *
load_buffer_descriptor(nir_builder * b,apply_layout_state * state,nir_def * rsrc,unsigned access)150 load_buffer_descriptor(nir_builder *b, apply_layout_state *state, nir_def *rsrc, unsigned access)
151 {
152    nir_binding binding = nir_chase_binding(nir_src_for_ssa(rsrc));
153 
154    /* If binding.success=false, then this is a variable pointer, which we don't support with
155     * VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK.
156     */
157    if (binding.success) {
158       struct radv_descriptor_set_layout *layout = state->layout->set[binding.desc_set].layout;
159       if (layout->binding[binding.binding].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK) {
160          rsrc = nir_iadd(b, nir_channel(b, rsrc, 0), nir_channel(b, rsrc, 1));
161          return load_inline_buffer_descriptor(b, state, rsrc);
162       }
163    }
164 
165    if (access & ACCESS_NON_UNIFORM)
166       return nir_iadd(b, nir_channel(b, rsrc, 0), nir_channel(b, rsrc, 1));
167 
168    nir_def *desc_set = convert_pointer_to_64_bit(b, state, nir_channel(b, rsrc, 0));
169    return nir_load_smem_amd(b, 4, desc_set, nir_channel(b, rsrc, 1), .align_mul = 16);
170 }
171 
172 static void
visit_get_ssbo_size(nir_builder * b,apply_layout_state * state,nir_intrinsic_instr * intrin)173 visit_get_ssbo_size(nir_builder *b, apply_layout_state *state, nir_intrinsic_instr *intrin)
174 {
175    nir_def *rsrc = intrin->src[0].ssa;
176 
177    nir_def *size;
178    if (nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM) {
179       nir_def *ptr = nir_iadd(b, nir_channel(b, rsrc, 0), nir_channel(b, rsrc, 1));
180       ptr = nir_iadd_imm(b, ptr, 8);
181       ptr = convert_pointer_to_64_bit(b, state, ptr);
182       size = nir_build_load_global(b, 4, 32, ptr, .access = ACCESS_NON_WRITEABLE | ACCESS_CAN_REORDER, .align_mul = 16,
183                                    .align_offset = 4);
184    } else {
185       /* load the entire descriptor so it can be CSE'd */
186       nir_def *ptr = convert_pointer_to_64_bit(b, state, nir_channel(b, rsrc, 0));
187       nir_def *desc = nir_load_smem_amd(b, 4, ptr, nir_channel(b, rsrc, 1), .align_mul = 16);
188       size = nir_channel(b, desc, 2);
189    }
190 
191    nir_def_replace(&intrin->def, size);
192 }
193 
194 static nir_def *
get_sampler_desc(nir_builder * b,apply_layout_state * state,nir_deref_instr * deref,enum ac_descriptor_type desc_type,bool non_uniform,nir_tex_instr * tex,bool write)195 get_sampler_desc(nir_builder *b, apply_layout_state *state, nir_deref_instr *deref, enum ac_descriptor_type desc_type,
196                  bool non_uniform, nir_tex_instr *tex, bool write)
197 {
198    nir_variable *var = nir_deref_instr_get_variable(deref);
199    assert(var);
200    unsigned desc_set = var->data.descriptor_set;
201    unsigned binding_index = var->data.binding;
202    bool indirect = nir_deref_instr_has_indirect(deref);
203 
204    struct radv_descriptor_set_layout *layout = state->layout->set[desc_set].layout;
205    struct radv_descriptor_set_binding_layout *binding = &layout->binding[binding_index];
206 
207    /* Handle immutable and embedded (compile-time) samplers
208     * (VkDescriptorSetLayoutBinding::pImmutableSamplers) We can only do this for constant array
209     * index or if all samplers in the array are the same. Note that indexing is forbidden with
210     * embedded samplers.
211     */
212    if (desc_type == AC_DESC_SAMPLER && binding->immutable_samplers_offset &&
213        (!indirect || binding->immutable_samplers_equal)) {
214       unsigned constant_index = 0;
215       if (!binding->immutable_samplers_equal) {
216          while (deref->deref_type != nir_deref_type_var) {
217             assert(deref->deref_type == nir_deref_type_array);
218             unsigned array_size = MAX2(glsl_get_aoa_size(deref->type), 1);
219             constant_index += nir_src_as_uint(deref->arr.index) * array_size;
220             deref = nir_deref_instr_parent(deref);
221          }
222       }
223 
224       uint32_t dword0_mask =
225          tex->op == nir_texop_tg4 && state->disable_tg4_trunc_coord ? C_008F30_TRUNC_COORD : 0xffffffffu;
226       const uint32_t *samplers = radv_immutable_samplers(layout, binding);
227       return nir_imm_ivec4(b, samplers[constant_index * 4 + 0] & dword0_mask, samplers[constant_index * 4 + 1],
228                            samplers[constant_index * 4 + 2], samplers[constant_index * 4 + 3]);
229    }
230 
231    unsigned size = 8;
232    unsigned offset = binding->offset;
233    switch (desc_type) {
234    case AC_DESC_IMAGE:
235    case AC_DESC_PLANE_0:
236       break;
237    case AC_DESC_FMASK:
238    case AC_DESC_PLANE_1:
239       offset += 32;
240       break;
241    case AC_DESC_SAMPLER:
242       size = 4;
243       if (binding->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
244          offset += radv_combined_image_descriptor_sampler_offset(binding);
245       break;
246    case AC_DESC_BUFFER:
247       size = 4;
248       break;
249    case AC_DESC_PLANE_2:
250       size = 4;
251       offset += 64;
252       break;
253    }
254 
255    nir_def *index = NULL;
256    while (deref->deref_type != nir_deref_type_var) {
257       assert(deref->deref_type == nir_deref_type_array);
258       unsigned array_size = MAX2(glsl_get_aoa_size(deref->type), 1);
259       array_size *= binding->size;
260 
261       nir_def *tmp = nir_imul_imm(b, deref->arr.index.ssa, array_size);
262       if (tmp != deref->arr.index.ssa)
263          nir_instr_as_alu(tmp->parent_instr)->no_unsigned_wrap = true;
264 
265       if (index) {
266          index = nir_iadd(b, tmp, index);
267          nir_instr_as_alu(index->parent_instr)->no_unsigned_wrap = true;
268       } else {
269          index = tmp;
270       }
271 
272       deref = nir_deref_instr_parent(deref);
273    }
274 
275    nir_def *index_offset = index ? nir_iadd_imm(b, index, offset) : nir_imm_int(b, offset);
276    if (index && index_offset != index)
277       nir_instr_as_alu(index_offset->parent_instr)->no_unsigned_wrap = true;
278 
279    if (non_uniform)
280       return nir_iadd(b, load_desc_ptr(b, state, desc_set), index_offset);
281 
282    nir_def *addr = convert_pointer_to_64_bit(b, state, load_desc_ptr(b, state, desc_set));
283    nir_def *desc = nir_load_smem_amd(b, size, addr, index_offset, .align_mul = size * 4u);
284 
285    /* 3 plane formats always have same size and format for plane 1 & 2, so
286     * use the tail from plane 1 so that we can store only the first 16 bytes
287     * of the last plane. */
288    if (desc_type == AC_DESC_PLANE_2) {
289       nir_def *desc2 = get_sampler_desc(b, state, deref, AC_DESC_PLANE_1, non_uniform, tex, write);
290 
291       nir_def *comp[8];
292       for (unsigned i = 0; i < 4; i++)
293          comp[i] = nir_channel(b, desc, i);
294       for (unsigned i = 4; i < 8; i++)
295          comp[i] = nir_channel(b, desc2, i);
296 
297       return nir_vec(b, comp, 8);
298    } else if (desc_type == AC_DESC_IMAGE && state->has_image_load_dcc_bug && !tex && !write) {
299       nir_def *comp[8];
300       for (unsigned i = 0; i < 8; i++)
301          comp[i] = nir_channel(b, desc, i);
302 
303       /* WRITE_COMPRESS_ENABLE must be 0 for all image loads to workaround a
304        * hardware bug.
305        */
306       comp[6] = nir_iand_imm(b, comp[6], C_00A018_WRITE_COMPRESS_ENABLE);
307 
308       return nir_vec(b, comp, 8);
309    } else if (desc_type == AC_DESC_SAMPLER && tex->op == nir_texop_tg4 && state->disable_tg4_trunc_coord) {
310       nir_def *comp[4];
311       for (unsigned i = 0; i < 4; i++)
312          comp[i] = nir_channel(b, desc, i);
313 
314       /* We want to always use the linear filtering truncation behaviour for
315        * nir_texop_tg4, even if the sampler uses nearest/point filtering.
316        */
317       comp[0] = nir_iand_imm(b, comp[0], C_008F30_TRUNC_COORD);
318 
319       return nir_vec(b, comp, 4);
320    }
321 
322    return desc;
323 }
324 
325 static void
update_image_intrinsic(nir_builder * b,apply_layout_state * state,nir_intrinsic_instr * intrin)326 update_image_intrinsic(nir_builder *b, apply_layout_state *state, nir_intrinsic_instr *intrin)
327 {
328    nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
329    const enum glsl_sampler_dim dim = glsl_get_sampler_dim(deref->type);
330    bool is_load =
331       intrin->intrinsic == nir_intrinsic_image_deref_load || intrin->intrinsic == nir_intrinsic_image_deref_sparse_load;
332 
333    nir_def *desc = get_sampler_desc(b, state, deref, dim == GLSL_SAMPLER_DIM_BUF ? AC_DESC_BUFFER : AC_DESC_IMAGE,
334                                     nir_intrinsic_access(intrin) & ACCESS_NON_UNIFORM, NULL, !is_load);
335 
336    if (intrin->intrinsic == nir_intrinsic_image_deref_descriptor_amd) {
337       nir_def_replace(&intrin->def, desc);
338    } else {
339       nir_rewrite_image_intrinsic(intrin, desc, true);
340    }
341 }
342 
343 static bool
can_increase_load_size(nir_intrinsic_instr * intrin,unsigned offset,unsigned old,unsigned new)344 can_increase_load_size(nir_intrinsic_instr *intrin, unsigned offset, unsigned old, unsigned new)
345 {
346    /* Only increase the size of loads if doing so won't extend into a new page/cache-line. */
347    unsigned align_mul = MIN2(nir_intrinsic_align_mul(intrin), 64u);
348    unsigned end = (nir_intrinsic_align_offset(intrin) + offset + old) & (align_mul - 1);
349    return (new - old) <= (align_mul - end);
350 }
351 
352 static nir_def *
load_push_constant(nir_builder * b,apply_layout_state * state,nir_intrinsic_instr * intrin)353 load_push_constant(nir_builder *b, apply_layout_state *state, nir_intrinsic_instr *intrin)
354 {
355    unsigned base = nir_intrinsic_base(intrin);
356    unsigned bit_size = intrin->def.bit_size;
357    unsigned count = intrin->def.num_components * (bit_size / 32u);
358    assert(bit_size >= 32);
359 
360    nir_def *addr = NULL;
361    nir_def *offset = NULL;
362    unsigned const_offset = -1;
363    if (nir_src_is_const(intrin->src[0]))
364       const_offset = (base + nir_src_as_uint(intrin->src[0])) / 4u;
365 
366    const unsigned max_push_constant = sizeof(state->args->ac.inline_push_const_mask) * 8u;
367 
368    nir_def *data[NIR_MAX_VEC_COMPONENTS * 2];
369    unsigned num_loads = 0;
370    for (unsigned start = 0; start < count;) {
371       /* Try to use inline push constants when possible. */
372       unsigned inline_idx = const_offset + start;
373       if (const_offset != -1 && inline_idx < max_push_constant &&
374           (state->args->ac.inline_push_const_mask & BITFIELD64_BIT(inline_idx))) {
375          inline_idx = util_bitcount64(state->args->ac.inline_push_const_mask & BITFIELD64_MASK(inline_idx));
376          data[num_loads++] = get_scalar_arg(b, 1, state->args->ac.inline_push_consts[inline_idx]);
377          start += 1;
378          continue;
379       }
380 
381       if (!offset) {
382          addr = get_scalar_arg(b, 1, state->args->ac.push_constants);
383          addr = convert_pointer_to_64_bit(b, state, addr);
384          offset = nir_iadd_imm_nuw(b, intrin->src[0].ssa, base);
385       }
386       unsigned size = 1 << (util_last_bit(count - start) - 1); /* Round down to power of two. */
387       /* Try to round up to power of two instead. */
388       if (size < (count - start) && can_increase_load_size(intrin, start * 4, size, size * 2))
389          size *= 2;
390 
391       data[num_loads++] = nir_load_smem_amd(b, size, addr, nir_iadd_imm_nuw(b, offset, start * 4));
392       start += size;
393    }
394    return nir_extract_bits(b, data, num_loads, 0, intrin->def.num_components, bit_size);
395 }
396 
397 static void
apply_layout_to_intrin(nir_builder * b,apply_layout_state * state,nir_intrinsic_instr * intrin)398 apply_layout_to_intrin(nir_builder *b, apply_layout_state *state, nir_intrinsic_instr *intrin)
399 {
400    b->cursor = nir_before_instr(&intrin->instr);
401 
402    nir_def *rsrc;
403    switch (intrin->intrinsic) {
404    case nir_intrinsic_vulkan_resource_index:
405       visit_vulkan_resource_index(b, state, intrin);
406       break;
407    case nir_intrinsic_vulkan_resource_reindex:
408       visit_vulkan_resource_reindex(b, state, intrin);
409       break;
410    case nir_intrinsic_load_vulkan_descriptor:
411       visit_load_vulkan_descriptor(b, state, intrin);
412       break;
413    case nir_intrinsic_load_ubo:
414    case nir_intrinsic_load_ssbo:
415    case nir_intrinsic_ssbo_atomic:
416    case nir_intrinsic_ssbo_atomic_swap:
417       rsrc = load_buffer_descriptor(b, state, intrin->src[0].ssa, nir_intrinsic_access(intrin));
418       nir_src_rewrite(&intrin->src[0], rsrc);
419       break;
420    case nir_intrinsic_store_ssbo:
421       rsrc = load_buffer_descriptor(b, state, intrin->src[1].ssa, nir_intrinsic_access(intrin));
422       nir_src_rewrite(&intrin->src[1], rsrc);
423       break;
424    case nir_intrinsic_get_ssbo_size:
425       visit_get_ssbo_size(b, state, intrin);
426       break;
427    case nir_intrinsic_image_deref_load:
428    case nir_intrinsic_image_deref_sparse_load:
429    case nir_intrinsic_image_deref_store:
430    case nir_intrinsic_image_deref_atomic:
431    case nir_intrinsic_image_deref_atomic_swap:
432    case nir_intrinsic_image_deref_size:
433    case nir_intrinsic_image_deref_samples:
434    case nir_intrinsic_image_deref_descriptor_amd:
435       update_image_intrinsic(b, state, intrin);
436       break;
437    case nir_intrinsic_load_push_constant: {
438       nir_def_replace(&intrin->def, load_push_constant(b, state, intrin));
439       break;
440    }
441    default:
442       break;
443    }
444 }
445 
446 static void
apply_layout_to_tex(nir_builder * b,apply_layout_state * state,nir_tex_instr * tex)447 apply_layout_to_tex(nir_builder *b, apply_layout_state *state, nir_tex_instr *tex)
448 {
449    b->cursor = nir_before_instr(&tex->instr);
450 
451    nir_deref_instr *texture_deref_instr = NULL;
452    nir_deref_instr *sampler_deref_instr = NULL;
453    int plane = -1;
454 
455    for (unsigned i = 0; i < tex->num_srcs; i++) {
456       switch (tex->src[i].src_type) {
457       case nir_tex_src_texture_deref:
458          texture_deref_instr = nir_src_as_deref(tex->src[i].src);
459          break;
460       case nir_tex_src_sampler_deref:
461          sampler_deref_instr = nir_src_as_deref(tex->src[i].src);
462          break;
463       case nir_tex_src_plane:
464          plane = nir_src_as_int(tex->src[i].src);
465          break;
466       default:
467          break;
468       }
469    }
470 
471    nir_def *image = NULL;
472    nir_def *sampler = NULL;
473    if (plane >= 0) {
474       assert(tex->op != nir_texop_txf_ms && tex->op != nir_texop_samples_identical);
475       assert(tex->sampler_dim != GLSL_SAMPLER_DIM_BUF);
476       image =
477          get_sampler_desc(b, state, texture_deref_instr, AC_DESC_PLANE_0 + plane, tex->texture_non_uniform, tex, false);
478    } else if (tex->sampler_dim == GLSL_SAMPLER_DIM_BUF) {
479       image = get_sampler_desc(b, state, texture_deref_instr, AC_DESC_BUFFER, tex->texture_non_uniform, tex, false);
480    } else if (tex->op == nir_texop_fragment_mask_fetch_amd) {
481       image = get_sampler_desc(b, state, texture_deref_instr, AC_DESC_FMASK, tex->texture_non_uniform, tex, false);
482    } else {
483       image = get_sampler_desc(b, state, texture_deref_instr, AC_DESC_IMAGE, tex->texture_non_uniform, tex, false);
484    }
485 
486    if (sampler_deref_instr) {
487       sampler = get_sampler_desc(b, state, sampler_deref_instr, AC_DESC_SAMPLER, tex->sampler_non_uniform, tex, false);
488 
489       if (state->disable_aniso_single_level && tex->sampler_dim < GLSL_SAMPLER_DIM_RECT && state->gfx_level < GFX8) {
490          /* Disable anisotropic filtering if BASE_LEVEL == LAST_LEVEL.
491           *
492           * GFX6-GFX7:
493           *   If BASE_LEVEL == LAST_LEVEL, the shader must disable anisotropic
494           *   filtering manually. The driver sets img7 to a mask clearing
495           *   MAX_ANISO_RATIO if BASE_LEVEL == LAST_LEVEL. The shader must do:
496           *     s_and_b32 samp0, samp0, img7
497           *
498           * GFX8:
499           *   The ANISO_OVERRIDE sampler field enables this fix in TA.
500           */
501          /* TODO: This is unnecessary for combined image+sampler.
502           * We can do this when updating the desc set. */
503          nir_def *comp[4];
504          for (unsigned i = 0; i < 4; i++)
505             comp[i] = nir_channel(b, sampler, i);
506          comp[0] = nir_iand(b, comp[0], nir_channel(b, image, 7));
507 
508          sampler = nir_vec(b, comp, 4);
509       }
510    }
511 
512    if (tex->op == nir_texop_descriptor_amd) {
513       nir_def_replace(&tex->def, image);
514       return;
515    }
516 
517    for (unsigned i = 0; i < tex->num_srcs; i++) {
518       switch (tex->src[i].src_type) {
519       case nir_tex_src_texture_deref:
520          tex->src[i].src_type = nir_tex_src_texture_handle;
521          nir_src_rewrite(&tex->src[i].src, image);
522          break;
523       case nir_tex_src_sampler_deref:
524          tex->src[i].src_type = nir_tex_src_sampler_handle;
525          nir_src_rewrite(&tex->src[i].src, sampler);
526          break;
527       default:
528          break;
529       }
530    }
531 }
532 
533 void
radv_nir_apply_pipeline_layout(nir_shader * shader,struct radv_device * device,const struct radv_shader_stage * stage)534 radv_nir_apply_pipeline_layout(nir_shader *shader, struct radv_device *device, const struct radv_shader_stage *stage)
535 {
536    const struct radv_physical_device *pdev = radv_device_physical(device);
537    const struct radv_instance *instance = radv_physical_device_instance(pdev);
538 
539    apply_layout_state state = {
540       .gfx_level = pdev->info.gfx_level,
541       .address32_hi = pdev->info.address32_hi,
542       .disable_aniso_single_level = instance->drirc.disable_aniso_single_level,
543       .has_image_load_dcc_bug = pdev->info.has_image_load_dcc_bug,
544       .disable_tg4_trunc_coord = !pdev->info.conformant_trunc_coord && !device->disable_trunc_coord,
545       .args = &stage->args,
546       .info = &stage->info,
547       .layout = &stage->layout,
548    };
549 
550    nir_builder b;
551 
552    nir_foreach_function (function, shader) {
553       if (!function->impl)
554          continue;
555 
556       b = nir_builder_create(function->impl);
557 
558       /* Iterate in reverse so load_ubo lowering can look at
559        * the vulkan_resource_index to tell if it's an inline
560        * ubo.
561        */
562       nir_foreach_block_reverse (block, function->impl) {
563          nir_foreach_instr_reverse_safe (instr, block) {
564             if (instr->type == nir_instr_type_tex)
565                apply_layout_to_tex(&b, &state, nir_instr_as_tex(instr));
566             else if (instr->type == nir_instr_type_intrinsic)
567                apply_layout_to_intrin(&b, &state, nir_instr_as_intrinsic(instr));
568          }
569       }
570 
571       nir_metadata_preserve(function->impl, nir_metadata_control_flow);
572    }
573 }
574