1 /*
2 * Copyright © 2024 Collabora, Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef VK_SHADER_H
25 #define VK_SHADER_H
26
27 #include "compiler/spirv/nir_spirv.h"
28 #include "vk_limits.h"
29 #include "vk_pipeline_cache.h"
30
31 #include "util/mesa-blake3.h"
32
33 #ifdef __cplusplus
34 extern "C" {
35 #endif
36
37 struct blob;
38 struct nir_shader;
39 struct vk_command_buffer;
40 struct vk_device;
41 struct vk_descriptor_set_layout;
42 struct vk_dynamic_graphics_state;
43 struct vk_graphics_pipeline_state;
44 struct vk_physical_device;
45 struct vk_pipeline;
46 struct vk_pipeline_robustness_state;
47
48 int vk_shader_cmp_graphics_stages(gl_shader_stage a, gl_shader_stage b);
49
50 #define VK_SHADER_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_MESA 0x1000
51
52 struct vk_shader_compile_info {
53 gl_shader_stage stage;
54 VkShaderCreateFlagsEXT flags;
55 VkShaderStageFlags next_stage_mask;
56 struct nir_shader *nir;
57
58 const struct vk_pipeline_robustness_state *robustness;
59
60 uint32_t set_layout_count;
61 struct vk_descriptor_set_layout * const *set_layouts;
62
63 uint32_t push_constant_range_count;
64 const VkPushConstantRange *push_constant_ranges;
65 };
66
67 struct vk_shader_ops;
68
69 #ifdef __GNUC__
70 #pragma GCC diagnostic push
71 #pragma GCC diagnostic error "-Wpadded"
72 #endif
73 struct vk_shader_pipeline_cache_key {
74 gl_shader_stage stage;
75 blake3_hash blake3;
76 };
77 #ifdef __GNUC__
78 #pragma GCC diagnostic pop
79 #endif
80
81 struct vk_shader {
82 struct vk_object_base base;
83
84 const struct vk_shader_ops *ops;
85
86 gl_shader_stage stage;
87
88 /* Used for the generic VkPipeline implementation */
89 struct {
90 struct vk_pipeline_cache_object cache_obj;
91 struct vk_shader_pipeline_cache_key cache_key;
92 } pipeline;
93 };
94
95 VK_DEFINE_NONDISP_HANDLE_CASTS(vk_shader, base, VkShaderEXT,
96 VK_OBJECT_TYPE_SHADER_EXT);
97
98 struct vk_shader_ops {
99 /** Destroy a vk_shader_object */
100 void (*destroy)(struct vk_device *device,
101 struct vk_shader *shader,
102 const VkAllocationCallbacks* pAllocator);
103
104 /** Serialize a vk_shader_object to a blob
105 *
106 * This function shouldn't need to do any validation of the blob data
107 * beyond basic sanity checking. The common implementation of
108 * vkGetShaderBinaryEXT verifies the blobUUID and version of input data as
109 * well as a size and checksum to ensure integrity. This callback is only
110 * invoked after validation of the input binary data.
111 */
112 bool (*serialize)(struct vk_device *device,
113 const struct vk_shader *shader,
114 struct blob *blob);
115
116 /** Returns executable properties for this shader
117 *
118 * This is equivalent to vkGetPipelineExecutableProperties(), only for a
119 * single vk_shader.
120 */
121 VkResult (*get_executable_properties)(struct vk_device *device,
122 const struct vk_shader *shader,
123 uint32_t *executable_count,
124 VkPipelineExecutablePropertiesKHR *properties);
125
126 /** Returns executable statistics for this shader
127 *
128 * This is equivalent to vkGetPipelineExecutableStatistics(), only for a
129 * single vk_shader.
130 */
131 VkResult (*get_executable_statistics)(struct vk_device *device,
132 const struct vk_shader *shader,
133 uint32_t executable_index,
134 uint32_t *statistic_count,
135 VkPipelineExecutableStatisticKHR *statistics);
136
137 /** Returns executable internal representations for this shader
138 *
139 * This is equivalent to vkGetPipelineExecutableInternalRepresentations(),
140 * only for a single vk_shader.
141 */
142 VkResult (*get_executable_internal_representations)(
143 struct vk_device *device,
144 const struct vk_shader *shader,
145 uint32_t executable_index,
146 uint32_t *internal_representation_count,
147 VkPipelineExecutableInternalRepresentationKHR *internal_representations);
148 };
149
150 void *vk_shader_zalloc(struct vk_device *device,
151 const struct vk_shader_ops *ops,
152 gl_shader_stage stage,
153 const VkAllocationCallbacks *alloc,
154 size_t size);
155 void vk_shader_free(struct vk_device *device,
156 const VkAllocationCallbacks *alloc,
157 struct vk_shader *shader);
158
159 static inline void
vk_shader_destroy(struct vk_device * device,struct vk_shader * shader,const VkAllocationCallbacks * alloc)160 vk_shader_destroy(struct vk_device *device,
161 struct vk_shader *shader,
162 const VkAllocationCallbacks *alloc)
163 {
164 shader->ops->destroy(device, shader, alloc);
165 }
166
167 struct vk_device_shader_ops {
168 /** Retrieves a NIR compiler options struct
169 *
170 * NIR compiler options are only allowed to vary based on physical device,
171 * stage, and robustness state.
172 */
173 const struct nir_shader_compiler_options *(*get_nir_options)(
174 struct vk_physical_device *device,
175 gl_shader_stage stage,
176 const struct vk_pipeline_robustness_state *rs);
177
178 /** Retrieves a SPIR-V options struct
179 *
180 * SPIR-V options are only allowed to vary based on physical device, stage,
181 * and robustness state.
182 */
183 struct spirv_to_nir_options (*get_spirv_options)(
184 struct vk_physical_device *device,
185 gl_shader_stage stage,
186 const struct vk_pipeline_robustness_state *rs);
187
188 /** Preprocesses a NIR shader
189 *
190 * This callback is optional.
191 *
192 * If non-NULL, this callback is invoked after the SPIR-V is parsed into
193 * NIR and before it is handed to compile(). The driver should do as much
194 * generic optimization and lowering as it can here. Importantly, the
195 * preprocess step only knows about the NIR input and the physical device,
196 * not any enabled device features or pipeline state. This allows us to
197 * potentially cache this shader and re-use it across pipelines.
198 */
199 void (*preprocess_nir)(struct vk_physical_device *device, nir_shader *nir);
200
201 /** True if the driver wants geometry stages linked
202 *
203 * If set to true, geometry stages will always be compiled with
204 * VK_SHADER_CREATE_LINK_STAGE_BIT_EXT when pipelines are used.
205 */
206 bool link_geom_stages;
207
208 /** Hash a vk_graphics_state object
209 *
210 * This callback hashes whatever bits of vk_graphics_pipeline_state might
211 * be used to compile a shader in one of the given stages.
212 */
213 void (*hash_graphics_state)(struct vk_physical_device *device,
214 const struct vk_graphics_pipeline_state *state,
215 VkShaderStageFlags stages,
216 blake3_hash blake3_out);
217
218 /** Compile (and potentially link) a set of shaders
219 *
220 * Unlike vkCreateShadersEXT, this callback will only ever be called with
221 * multiple shaders if VK_SHADER_CREATE_LINK_STAGE_BIT_EXT is set on all of
222 * them. We also guarantee that the shaders occur in the call in Vulkan
223 * pipeline stage order as dictated by vk_shader_cmp_graphics_stages().
224 *
225 * This callback consumes all input NIR shaders, regardless of whether or
226 * not it was successful.
227 */
228 VkResult (*compile)(struct vk_device *device,
229 uint32_t shader_count,
230 struct vk_shader_compile_info *infos,
231 const struct vk_graphics_pipeline_state *state,
232 const VkAllocationCallbacks* pAllocator,
233 struct vk_shader **shaders_out);
234
235 /** Create a vk_shader from a binary blob */
236 VkResult (*deserialize)(struct vk_device *device,
237 struct blob_reader *blob,
238 uint32_t binary_version,
239 const VkAllocationCallbacks* pAllocator,
240 struct vk_shader **shader_out);
241
242 /** Bind a set of shaders
243 *
244 * This is roughly equivalent to vkCmdBindShadersEXT()
245 */
246 void (*cmd_bind_shaders)(struct vk_command_buffer *cmd_buffer,
247 uint32_t stage_count,
248 const gl_shader_stage *stages,
249 struct vk_shader ** const shaders);
250
251 /** Sets dynamic state */
252 void (*cmd_set_dynamic_graphics_state)(struct vk_command_buffer *cmd_buffer,
253 const struct vk_dynamic_graphics_state *state);
254 };
255
256 #ifdef __cplusplus
257 }
258 #endif
259
260 #endif /* VK_SHADER_H */
261