1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef ANV_NIR_H
25 #define ANV_NIR_H
26
27 #include "nir/nir.h"
28 #include "anv_private.h"
29
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33
34 #define anv_drv_const_offset(field) \
35 (offsetof(struct anv_push_constants, field))
36 #define anv_drv_const_size(field) \
37 (sizeof(((struct anv_push_constants *)0)->field))
38
39 #define anv_load_driver_uniform(b, components, field) \
40 nir_load_push_constant(b, components, \
41 anv_drv_const_size(field) * 8, \
42 nir_imm_int(b, 0), \
43 .base = anv_drv_const_offset(field), \
44 .range = components * anv_drv_const_size(field))
45 #define anv_load_driver_uniform_indexed(b, components, field, idx) \
46 nir_load_push_constant(b, components, \
47 anv_drv_const_size(field[0]) * 8, \
48 nir_imul_imm(b, idx, \
49 anv_drv_const_size(field[0])), \
50 .base = anv_drv_const_offset(field), \
51 .range = anv_drv_const_size(field))
52
53
54
55 /* This map is represent a mapping where the key is the NIR
56 * nir_intrinsic_resource_intel::block index. It allows mapping bindless UBOs
57 * accesses to descriptor entry.
58 *
59 * This map only temporary lives between the anv_nir_apply_pipeline_layout()
60 * and anv_nir_compute_push_layout() passes.
61 */
62 struct anv_pipeline_push_map {
63 uint32_t block_count;
64 struct anv_pipeline_binding *block_to_descriptor;
65 };
66
67 bool anv_check_for_primitive_replication(struct anv_device *device,
68 VkShaderStageFlags stages,
69 nir_shader **shaders,
70 uint32_t view_mask);
71
72 bool anv_nir_lower_load_patch_vertices_in(nir_shader *shader);
73
74 bool anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask,
75 bool use_primitive_replication);
76
77 bool anv_nir_lower_ycbcr_textures(nir_shader *shader,
78 const struct anv_pipeline_sets_layout *layout);
79
80 static inline nir_address_format
anv_nir_ssbo_addr_format(const struct anv_physical_device * pdevice,enum brw_robustness_flags robust_flags)81 anv_nir_ssbo_addr_format(const struct anv_physical_device *pdevice,
82 enum brw_robustness_flags robust_flags)
83 {
84 if (robust_flags & BRW_ROBUSTNESS_SSBO)
85 return nir_address_format_64bit_bounded_global;
86 else
87 return nir_address_format_64bit_global_32bit_offset;
88 }
89
90 static inline nir_address_format
anv_nir_ubo_addr_format(const struct anv_physical_device * pdevice,enum brw_robustness_flags robust_flags)91 anv_nir_ubo_addr_format(const struct anv_physical_device *pdevice,
92 enum brw_robustness_flags robust_flags)
93 {
94 if (robust_flags & BRW_ROBUSTNESS_UBO)
95 return nir_address_format_64bit_bounded_global;
96 else
97 return nir_address_format_64bit_global_32bit_offset;
98 }
99
100 bool anv_nir_lower_ubo_loads(nir_shader *shader);
101
102 void anv_nir_apply_pipeline_layout(nir_shader *shader,
103 const struct anv_physical_device *pdevice,
104 enum brw_robustness_flags robust_flags,
105 bool independent_sets,
106 const struct anv_pipeline_sets_layout *layout,
107 struct anv_pipeline_bind_map *map,
108 struct anv_pipeline_push_map *push_map,
109 void *push_map_mem_ctx);
110
111 void anv_nir_compute_push_layout(nir_shader *nir,
112 const struct anv_physical_device *pdevice,
113 enum brw_robustness_flags robust_flags,
114 bool fragment_dynamic,
115 struct brw_stage_prog_data *prog_data,
116 struct anv_pipeline_bind_map *map,
117 const struct anv_pipeline_push_map *push_map,
118 enum anv_descriptor_set_layout_type desc_type,
119 void *mem_ctx);
120
121 void anv_nir_validate_push_layout(const struct anv_physical_device *pdevice,
122 struct brw_stage_prog_data *prog_data,
123 struct anv_pipeline_bind_map *map);
124
125 bool anv_nir_update_resource_intel_block(nir_shader *shader);
126
127 bool anv_nir_lower_resource_intel(nir_shader *shader,
128 const struct anv_physical_device *device,
129 enum anv_descriptor_set_layout_type desc_type);
130
131 bool anv_nir_add_base_work_group_id(nir_shader *shader);
132
133 uint32_t anv_nir_compute_used_push_descriptors(nir_shader *shader,
134 const struct anv_pipeline_sets_layout *layout);
135
136 bool anv_nir_loads_push_desc_buffer(nir_shader *nir,
137 const struct anv_pipeline_sets_layout *layout,
138 const struct anv_pipeline_bind_map *bind_map);
139
140 uint32_t anv_nir_push_desc_ubo_fully_promoted(nir_shader *nir,
141 const struct anv_pipeline_sets_layout *layout,
142 const struct anv_pipeline_bind_map *bind_map);
143
144 #ifdef __cplusplus
145 }
146 #endif
147
148 #endif /* ANV_NIR_H */
149