1 // Copyright (c) 2017 The vulkano developers
2 // Licensed under the Apache License, Version 2.0
3 // <LICENSE-APACHE or
4 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT
5 // license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
6 // at your option. All files in the project carrying such
7 // notice may not be copied, modified, or distributed except
8 // according to those terms.
9 
10 use crate::{
11     buffer::Buffer,
12     descriptor_set::layout::DescriptorType,
13     device::{Device, QueueFlags},
14     image::{sys::Image, ImageAspects, ImageLayout, ImageSubresourceRange},
15     macros::{vulkan_bitflags, vulkan_bitflags_enum},
16     shader::ShaderStages,
17     DeviceSize, RequirementNotMet, Version,
18 };
19 use ahash::HashMap;
20 use once_cell::sync::Lazy;
21 use smallvec::SmallVec;
22 use std::{ops::Range, sync::Arc};
23 
24 vulkan_bitflags_enum! {
25     #[non_exhaustive]
26     /// A set of [`PipelineStage`] values.
27     PipelineStages impl {
28         /// Returns whether `self` contains stages that are only available in
29         /// `VkPipelineStageFlagBits2`.
30         pub(crate) fn is_2(self) -> bool {
31             !(self
32                 - (PipelineStages::TOP_OF_PIPE
33                     | PipelineStages::DRAW_INDIRECT
34                     | PipelineStages::VERTEX_INPUT
35                     | PipelineStages::VERTEX_SHADER
36                     | PipelineStages::TESSELLATION_CONTROL_SHADER
37                     | PipelineStages::TESSELLATION_EVALUATION_SHADER
38                     | PipelineStages::GEOMETRY_SHADER
39                     | PipelineStages::FRAGMENT_SHADER
40                     | PipelineStages::EARLY_FRAGMENT_TESTS
41                     | PipelineStages::LATE_FRAGMENT_TESTS
42                     | PipelineStages::COLOR_ATTACHMENT_OUTPUT
43                     | PipelineStages::COMPUTE_SHADER
44                     | PipelineStages::ALL_TRANSFER
45                     | PipelineStages::BOTTOM_OF_PIPE
46                     | PipelineStages::HOST
47                     | PipelineStages::ALL_GRAPHICS
48                     | PipelineStages::ALL_COMMANDS
49                     | PipelineStages::TRANSFORM_FEEDBACK
50                     | PipelineStages::CONDITIONAL_RENDERING
51                     | PipelineStages::ACCELERATION_STRUCTURE_BUILD
52                     | PipelineStages::RAY_TRACING_SHADER
53                     | PipelineStages::FRAGMENT_DENSITY_PROCESS
54                     | PipelineStages::FRAGMENT_SHADING_RATE_ATTACHMENT
55                     | PipelineStages::COMMAND_PREPROCESS
56                     | PipelineStages::TASK_SHADER
57                     | PipelineStages::MESH_SHADER))
58                 .is_empty()
59         }
60 
61         /// Replaces and unsets flags that are equivalent to multiple other flags.
62         ///
63         /// This may set flags that are not supported by the device, so this is for internal use only
64         /// and should not be passed on to Vulkan.
65         pub(crate) fn expand(mut self, queue_flags: QueueFlags) -> Self {
66             if self.intersects(PipelineStages::ALL_COMMANDS) {
67                 self -= PipelineStages::ALL_COMMANDS;
68                 self |= queue_flags.into();
69             }
70 
71             if self.intersects(PipelineStages::ALL_GRAPHICS) {
72                 self -= PipelineStages::ALL_GRAPHICS;
73                 self |= QueueFlags::GRAPHICS.into();
74             }
75 
76             if self.intersects(PipelineStages::VERTEX_INPUT) {
77                 self -= PipelineStages::VERTEX_INPUT;
78                 self |= PipelineStages::INDEX_INPUT | PipelineStages::VERTEX_ATTRIBUTE_INPUT;
79             }
80 
81             if self.intersects(PipelineStages::PRE_RASTERIZATION_SHADERS) {
82                 self -= PipelineStages::PRE_RASTERIZATION_SHADERS;
83                 self |= PipelineStages::VERTEX_SHADER
84                     | PipelineStages::TESSELLATION_CONTROL_SHADER
85                     | PipelineStages::TESSELLATION_EVALUATION_SHADER
86                     | PipelineStages::GEOMETRY_SHADER
87                     | PipelineStages::TASK_SHADER
88                     | PipelineStages::MESH_SHADER;
89             }
90 
91             if self.intersects(PipelineStages::ALL_TRANSFER) {
92                 self -= PipelineStages::ALL_TRANSFER;
93                 self |= PipelineStages::COPY
94                     | PipelineStages::RESOLVE
95                     | PipelineStages::BLIT
96                     | PipelineStages::CLEAR
97                     | PipelineStages::ACCELERATION_STRUCTURE_COPY;
98             }
99 
100             self
101         }
102 
103         pub(crate) fn with_earlier(self) -> Self {
104             STAGE_ORDER.iter().rev().fold(
105                 self,
106                 |stages, &(before, after)| if stages.intersects(after) {
107                     stages.union(before)
108                 } else {
109                     stages
110                 }
111             )
112         }
113 
114         pub(crate) fn with_later(self) -> Self {
115             STAGE_ORDER.iter().fold(
116                 self,
117                 |stages, &(before, after)| if stages.intersects(before) {
118                     stages.union(after)
119                 } else {
120                     stages
121                 }
122             )
123         }
124     },
125 
126     /// A single stage in the device's processing pipeline.
127     PipelineStage,
128 
129     = PipelineStageFlags2(u64);
130 
131     /// A pseudo-stage representing the start of the pipeline.
132     TOP_OF_PIPE, TopOfPipe = TOP_OF_PIPE,
133 
134     /// Indirect buffers are read.
135     DRAW_INDIRECT, DrawIndirect = DRAW_INDIRECT,
136 
137     /// Vertex and index buffers are read.
138     ///
139     /// It is currently equivalent to setting all of the following flags, but automatically
140     /// omitting any that are not supported in a given context. It also implicitly includes future
141     /// flags that are added to Vulkan, if they are not yet supported by Vulkano.
142     /// - `index_input`
143     /// - `vertex_attribute_input`
144     VERTEX_INPUT, VertexInput = VERTEX_INPUT,
145 
146     /// Vertex shaders are executed.
147     VERTEX_SHADER, VertexShader = VERTEX_SHADER,
148 
149     /// Tessellation control shaders are executed.
150     TESSELLATION_CONTROL_SHADER, TessellationControlShader = TESSELLATION_CONTROL_SHADER,
151 
152     /// Tessellation evaluation shaders are executed.
153     TESSELLATION_EVALUATION_SHADER, TessellationEvaluationShader = TESSELLATION_EVALUATION_SHADER,
154 
155     /// Geometry shaders are executed.
156     GEOMETRY_SHADER, GeometryShader = GEOMETRY_SHADER,
157 
158     /// Fragment shaders are executed.
159     FRAGMENT_SHADER, FragmentShader = FRAGMENT_SHADER,
160 
161     /// Early fragment tests (depth and stencil tests before fragment shading) are performed.
162     /// Subpass load operations for framebuffer attachments with a depth/stencil format are
163     /// performed.
164     EARLY_FRAGMENT_TESTS, EarlyFragmentTests = EARLY_FRAGMENT_TESTS,
165 
166     /// Late fragment tests (depth and stencil tests after fragment shading) are performed.
167     /// Subpass store operations for framebuffer attachments with a depth/stencil format are
168     /// performed.
169     LATE_FRAGMENT_TESTS, LateFragmentTests = LATE_FRAGMENT_TESTS,
170 
171     /// The final color values are output from the pipeline after blending.
172     /// Subpass load and store operations, multisample resolve operations for framebuffer
173     /// attachments with a color or depth/stencil format, and `clear_attachments` are performed.
174     COLOR_ATTACHMENT_OUTPUT, ColorAttachmentOutput = COLOR_ATTACHMENT_OUTPUT,
175 
176     /// Compute shaders are executed.
177     COMPUTE_SHADER, ComputeShader = COMPUTE_SHADER,
178 
179     /// The set of all current and future transfer pipeline stages.
180     ///
181     /// It is currently equivalent to setting all of the following flags, but automatically
182     /// omitting any that are not supported in a given context. It also implicitly includes future
183     /// flags that are added to Vulkan, if they are not yet supported by Vulkano.
184     /// - `copy`
185     /// - `blit`
186     /// - `resolve`
187     /// - `clear`
188     /// - `acceleration_structure_copy`
189     ALL_TRANSFER, AllTransfer = ALL_TRANSFER,
190 
191     /// A pseudo-stage representing the end of the pipeline.
192     BOTTOM_OF_PIPE, BottomOfPipe = BOTTOM_OF_PIPE,
193 
194     /// A pseudo-stage representing reads and writes to device memory on the host.
195     HOST, Host = HOST,
196 
197     /// The set of all current and future graphics pipeline stages.
198     ///
199     /// It is currently equivalent to setting all of the following flags, but automatically
200     /// omitting any that are not supported in a given context. It also implicitly includes future
201     /// flags that are added to Vulkan, if they are not yet supported by Vulkano.
202     /// - `draw_indirect`
203     /// - `task_shader`
204     /// - `mesh_shader`
205     /// - `vertex_input`
206     /// - `vertex_shader`
207     /// - `tessellation_control_shader`
208     /// - `tessellation_evaluation_shader`
209     /// - `geometry_shader`
210     /// - `fragment_shader`
211     /// - `early_fragment_tests`
212     /// - `late_fragment_tests`
213     /// - `color_attachment_output`
214     /// - `conditional_rendering`
215     /// - `transform_feedback`
216     /// - `fragment_shading_rate_attachment`
217     /// - `fragment_density_process`
218     /// - `invocation_mask`
219     ALL_GRAPHICS, AllGraphics = ALL_GRAPHICS,
220 
221     /// The set of all current and future pipeline stages of all types.
222     ///
223     /// It is currently equivalent to setting all flags in `PipelineStages`, but automatically
224     /// omitting any that are not supported in a given context. It also implicitly includes future
225     /// flags that are added to Vulkan, if they are not yet supported by Vulkano.
226     ALL_COMMANDS, AllCommands = ALL_COMMANDS,
227 
228     /// The `copy_buffer`, `copy_image`, `copy_buffer_to_image`, `copy_image_to_buffer` and
229     /// `copy_query_pool_results` commands are executed.
230     COPY, Copy = COPY {
231         api_version: V1_3,
232         device_extensions: [khr_synchronization2],
233     },
234 
235     /// The `resolve_image` command is executed.
236     RESOLVE, Resolve = RESOLVE {
237         api_version: V1_3,
238         device_extensions: [khr_synchronization2],
239     },
240 
241     /// The `blit_image` command is executed.
242     BLIT, Blit = BLIT {
243         api_version: V1_3,
244         device_extensions: [khr_synchronization2],
245     },
246 
247     /// The `clear_color_image`, `clear_depth_stencil_image`, `fill_buffer` and `update_buffer`
248     /// commands are executed.
249     CLEAR, Clear = CLEAR {
250         api_version: V1_3,
251         device_extensions: [khr_synchronization2],
252     },
253 
254     /// Index buffers are read.
255     INDEX_INPUT, IndexInput = INDEX_INPUT {
256         api_version: V1_3,
257         device_extensions: [khr_synchronization2],
258     },
259 
260     /// Vertex buffers are read.
261     VERTEX_ATTRIBUTE_INPUT, VertexAttributeInput = VERTEX_ATTRIBUTE_INPUT {
262         api_version: V1_3,
263         device_extensions: [khr_synchronization2],
264     },
265 
266     /// The various pre-rasterization shader types are executed.
267     ///
268     /// It is currently equivalent to setting all of the following flags, but automatically
269     /// omitting any that are not supported in a given context. It also implicitly includes future
270     /// flags that are added to Vulkan, if they are not yet supported by Vulkano.
271     /// - `vertex_shader`
272     /// - `tessellation_control_shader`
273     /// - `tessellation_evaluation_shader`
274     /// - `geometry_shader`
275     /// - `task_shader`
276     /// - `mesh_shader`
277     PRE_RASTERIZATION_SHADERS, PreRasterizationShaders = PRE_RASTERIZATION_SHADERS {
278         api_version: V1_3,
279         device_extensions: [khr_synchronization2],
280     },
281 
282     /// Video decode operations are performed.
283     VIDEO_DECODE, VideoDecode = VIDEO_DECODE_KHR {
284         device_extensions: [khr_video_decode_queue],
285     },
286 
287     /// Video encode operations are performed.
288     VIDEO_ENCODE, VideoEncode = VIDEO_ENCODE_KHR {
289         device_extensions: [khr_video_encode_queue],
290     },
291 
292     /// Vertex attribute output values are written to the transform feedback buffers.
293     TRANSFORM_FEEDBACK, TransformFeedback = TRANSFORM_FEEDBACK_EXT {
294         device_extensions: [ext_transform_feedback],
295     },
296 
297     /// The predicate of conditional rendering is read.
298     CONDITIONAL_RENDERING, ConditionalRendering = CONDITIONAL_RENDERING_EXT {
299         device_extensions: [ext_conditional_rendering],
300     },
301 
302     /// Acceleration_structure commands are executed.
303     ACCELERATION_STRUCTURE_BUILD, AccelerationStructureBuild = ACCELERATION_STRUCTURE_BUILD_KHR {
304         device_extensions: [khr_acceleration_structure, nv_ray_tracing],
305     },
306 
307     /// The various ray tracing shader types are executed.
308     RAY_TRACING_SHADER, RayTracingShader = RAY_TRACING_SHADER_KHR {
309         device_extensions: [khr_ray_tracing_pipeline, nv_ray_tracing],
310     },
311 
312     /// The fragment density map is read to generate the fragment areas.
313     FRAGMENT_DENSITY_PROCESS, FragmentDensityProcess = FRAGMENT_DENSITY_PROCESS_EXT {
314         device_extensions: [ext_fragment_density_map],
315     },
316 
317     /// The fragment shading rate attachment or shading rate image is read to determine the
318     /// fragment shading rate for portions of a rasterized primitive.
319     FRAGMENT_SHADING_RATE_ATTACHMENT, FragmentShadingRateAttachment = FRAGMENT_SHADING_RATE_ATTACHMENT_KHR {
320         device_extensions: [khr_fragment_shading_rate],
321     },
322 
323     /// Device-side preprocessing for generated commands via the `preprocess_generated_commands`
324     /// command is handled.
325     COMMAND_PREPROCESS, CommandPreprocess = COMMAND_PREPROCESS_NV {
326         device_extensions: [nv_device_generated_commands],
327     },
328 
329     /// Task shaders are executed.
330     TASK_SHADER, TaskShader = TASK_SHADER_EXT {
331         device_extensions: [ext_mesh_shader, nv_mesh_shader],
332     },
333 
334     /// Mesh shaders are executed.
335     MESH_SHADER, MeshShader = MESH_SHADER_EXT {
336         device_extensions: [ext_mesh_shader, nv_mesh_shader],
337     },
338 
339     /// Subpass shading shaders are executed.
340     SUBPASS_SHADING, SubpassShading = SUBPASS_SHADING_HUAWEI {
341         device_extensions: [huawei_subpass_shading],
342     },
343 
344     /// The invocation mask image is read to optimize ray dispatch.
345     INVOCATION_MASK, InvocationMask = INVOCATION_MASK_HUAWEI {
346         device_extensions: [huawei_invocation_mask],
347     },
348 
349     /// The `copy_acceleration_structure` command is executed.
350     ACCELERATION_STRUCTURE_COPY, AccelerationStructureCopy = ACCELERATION_STRUCTURE_COPY_KHR {
351         device_extensions: [khr_ray_tracing_maintenance1],
352     },
353 
354     /// Micromap commands are executed.
355     MICROMAP_BUILD, MicromapBuild = MICROMAP_BUILD_EXT {
356         device_extensions: [ext_opacity_micromap],
357     },
358 
359     /// Optical flow operations are performed.
360     OPTICAL_FLOW, OpticalFlow = OPTICAL_FLOW_NV {
361         device_extensions: [nv_optical_flow],
362     },
363 }
364 
365 macro_rules! stage_order {
366     {
367         $((
368             $($before:ident)|+,
369             $($after:ident)|+,
370         ),)+
371     } => {
372         static STAGE_ORDER: [(PipelineStages, PipelineStages); 15] = [
373             $(
374                 (
375                     PipelineStages::empty()
376                     $(.union(PipelineStages::$before))+
377                     ,
378                     PipelineStages::empty()
379                     $(.union(PipelineStages::$after))+
380                 ),
381             )+
382         ];
383     };
384 }
385 
386 // Per
387 // https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-pipeline-stages-types
388 stage_order! {
389     (
390         TOP_OF_PIPE,
391         DRAW_INDIRECT
392             | COPY | RESOLVE | BLIT | CLEAR
393             | VIDEO_DECODE | VIDEO_ENCODE
394             | CONDITIONAL_RENDERING
395             | COMMAND_PREPROCESS
396             | ACCELERATION_STRUCTURE_BUILD
397             | SUBPASS_SHADING
398             | ACCELERATION_STRUCTURE_COPY
399             | MICROMAP_BUILD
400             | OPTICAL_FLOW,
401     ),
402 
403     (
404         DRAW_INDIRECT,
405         COMPUTE_SHADER | INDEX_INPUT | RAY_TRACING_SHADER | TASK_SHADER,
406     ),
407 
408     (
409         INDEX_INPUT,
410         VERTEX_ATTRIBUTE_INPUT,
411     ),
412 
413     (
414         VERTEX_ATTRIBUTE_INPUT,
415         VERTEX_SHADER,
416     ),
417 
418     (
419         VERTEX_SHADER,
420         TESSELLATION_CONTROL_SHADER,
421     ),
422 
423     (
424         TESSELLATION_CONTROL_SHADER,
425         TESSELLATION_EVALUATION_SHADER,
426     ),
427 
428     (
429         TESSELLATION_EVALUATION_SHADER,
430         GEOMETRY_SHADER,
431     ),
432 
433     (
434         GEOMETRY_SHADER,
435         TRANSFORM_FEEDBACK,
436     ),
437 
438     (
439         TASK_SHADER,
440         MESH_SHADER,
441     ),
442 
443     (
444         TRANSFORM_FEEDBACK | MESH_SHADER,
445         FRAGMENT_SHADING_RATE_ATTACHMENT,
446     ),
447 
448     (
449         FRAGMENT_DENSITY_PROCESS | FRAGMENT_SHADING_RATE_ATTACHMENT,
450         EARLY_FRAGMENT_TESTS,
451     ),
452 
453     (
454         EARLY_FRAGMENT_TESTS,
455         FRAGMENT_SHADER,
456     ),
457 
458     (
459         FRAGMENT_SHADER,
460         LATE_FRAGMENT_TESTS,
461     ),
462 
463     (
464         LATE_FRAGMENT_TESTS,
465         COLOR_ATTACHMENT_OUTPUT,
466     ),
467 
468     (
469         COLOR_ATTACHMENT_OUTPUT
470             | COMPUTE_SHADER
471             | COPY | RESOLVE | BLIT | CLEAR
472             | VIDEO_DECODE | VIDEO_ENCODE
473             | CONDITIONAL_RENDERING
474             | COMMAND_PREPROCESS
475             | ACCELERATION_STRUCTURE_BUILD | RAY_TRACING_SHADER
476             | SUBPASS_SHADING
477             | ACCELERATION_STRUCTURE_COPY
478             | MICROMAP_BUILD
479             | OPTICAL_FLOW,
480         BOTTOM_OF_PIPE,
481     ),
482 }
483 
484 impl From<QueueFlags> for PipelineStages {
485     /// Corresponds to the table "[Supported pipeline stage flags]" in the Vulkan specification.
486     ///
487     /// [Supported pipeline stage flags]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-pipeline-stages-supported
488     #[inline]
from(val: QueueFlags) -> Self489     fn from(val: QueueFlags) -> Self {
490         let mut result = PipelineStages::TOP_OF_PIPE
491             | PipelineStages::BOTTOM_OF_PIPE
492             | PipelineStages::HOST
493             | PipelineStages::ALL_COMMANDS;
494 
495         if val.intersects(QueueFlags::GRAPHICS | QueueFlags::COMPUTE | QueueFlags::TRANSFER) {
496             result |= PipelineStages::ALL_TRANSFER
497                 | PipelineStages::COPY
498                 | PipelineStages::RESOLVE
499                 | PipelineStages::BLIT
500                 | PipelineStages::CLEAR
501                 | PipelineStages::ACCELERATION_STRUCTURE_COPY;
502         }
503 
504         if val.intersects(QueueFlags::GRAPHICS) {
505             result |= PipelineStages::DRAW_INDIRECT
506                 | PipelineStages::VERTEX_INPUT
507                 | PipelineStages::VERTEX_SHADER
508                 | PipelineStages::TESSELLATION_CONTROL_SHADER
509                 | PipelineStages::TESSELLATION_EVALUATION_SHADER
510                 | PipelineStages::GEOMETRY_SHADER
511                 | PipelineStages::FRAGMENT_SHADER
512                 | PipelineStages::EARLY_FRAGMENT_TESTS
513                 | PipelineStages::LATE_FRAGMENT_TESTS
514                 | PipelineStages::COLOR_ATTACHMENT_OUTPUT
515                 | PipelineStages::ALL_GRAPHICS
516                 | PipelineStages::INDEX_INPUT
517                 | PipelineStages::VERTEX_ATTRIBUTE_INPUT
518                 | PipelineStages::PRE_RASTERIZATION_SHADERS
519                 | PipelineStages::CONDITIONAL_RENDERING
520                 | PipelineStages::TRANSFORM_FEEDBACK
521                 | PipelineStages::COMMAND_PREPROCESS
522                 | PipelineStages::FRAGMENT_SHADING_RATE_ATTACHMENT
523                 | PipelineStages::TASK_SHADER
524                 | PipelineStages::MESH_SHADER
525                 | PipelineStages::FRAGMENT_DENSITY_PROCESS
526                 | PipelineStages::SUBPASS_SHADING
527                 | PipelineStages::INVOCATION_MASK;
528         }
529 
530         if val.intersects(QueueFlags::COMPUTE) {
531             result |= PipelineStages::DRAW_INDIRECT
532                 | PipelineStages::COMPUTE_SHADER
533                 | PipelineStages::CONDITIONAL_RENDERING
534                 | PipelineStages::COMMAND_PREPROCESS
535                 | PipelineStages::ACCELERATION_STRUCTURE_BUILD
536                 | PipelineStages::RAY_TRACING_SHADER
537                 | PipelineStages::MICROMAP_BUILD;
538         }
539 
540         if val.intersects(QueueFlags::VIDEO_DECODE) {
541             result |= PipelineStages::VIDEO_DECODE;
542         }
543 
544         if val.intersects(QueueFlags::VIDEO_ENCODE) {
545             result |= PipelineStages::VIDEO_ENCODE;
546         }
547 
548         if val.intersects(QueueFlags::OPTICAL_FLOW) {
549             result |= PipelineStages::OPTICAL_FLOW;
550         }
551 
552         result
553     }
554 }
555 
556 impl From<PipelineStage> for ash::vk::PipelineStageFlags {
557     #[inline]
from(val: PipelineStage) -> Self558     fn from(val: PipelineStage) -> Self {
559         Self::from_raw(val as u32)
560     }
561 }
562 
563 impl From<PipelineStages> for ash::vk::PipelineStageFlags {
564     #[inline]
from(val: PipelineStages) -> Self565     fn from(val: PipelineStages) -> Self {
566         Self::from_raw(ash::vk::PipelineStageFlags2::from(val).as_raw() as u32)
567     }
568 }
569 
570 vulkan_bitflags! {
571     #[non_exhaustive]
572 
573     /// A set of memory access types that are included in a memory dependency.
574     AccessFlags impl {
575         /// Returns whether `self` contains stages that are only available in
576         /// `VkAccessFlagBits2`.
577         pub(crate) fn is_2(self) -> bool {
578             !(self
579                 - (AccessFlags::INDIRECT_COMMAND_READ
580                     | AccessFlags::INDEX_READ
581                     | AccessFlags::VERTEX_ATTRIBUTE_READ
582                     | AccessFlags::UNIFORM_READ
583                     | AccessFlags::INPUT_ATTACHMENT_READ
584                     | AccessFlags::SHADER_READ
585                     | AccessFlags::SHADER_WRITE
586                     | AccessFlags::COLOR_ATTACHMENT_READ
587                     | AccessFlags::COLOR_ATTACHMENT_WRITE
588                     | AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ
589                     | AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE
590                     | AccessFlags::TRANSFER_READ
591                     | AccessFlags::TRANSFER_WRITE
592                     | AccessFlags::HOST_READ
593                     | AccessFlags::HOST_WRITE
594                     | AccessFlags::MEMORY_READ
595                     | AccessFlags::MEMORY_WRITE
596                     | AccessFlags::SHADER_SAMPLED_READ
597                     | AccessFlags::SHADER_STORAGE_READ
598                     | AccessFlags::SHADER_STORAGE_WRITE
599                     | AccessFlags::VIDEO_DECODE_READ
600                     | AccessFlags::VIDEO_DECODE_WRITE
601                     | AccessFlags::VIDEO_ENCODE_READ
602                     | AccessFlags::VIDEO_ENCODE_WRITE
603                     | AccessFlags::TRANSFORM_FEEDBACK_WRITE
604                     | AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ
605                     | AccessFlags::TRANSFORM_FEEDBACK_COUNTER_WRITE
606                     | AccessFlags::CONDITIONAL_RENDERING_READ
607                     | AccessFlags::COMMAND_PREPROCESS_READ
608                     | AccessFlags::COMMAND_PREPROCESS_WRITE
609                     | AccessFlags::FRAGMENT_SHADING_RATE_ATTACHMENT_READ
610                     | AccessFlags::ACCELERATION_STRUCTURE_READ
611                     | AccessFlags::ACCELERATION_STRUCTURE_WRITE
612                     | AccessFlags::FRAGMENT_DENSITY_MAP_READ
613                     | AccessFlags::COLOR_ATTACHMENT_READ_NONCOHERENT
614                     | AccessFlags::INVOCATION_MASK_READ))
615                 .is_empty()
616         }
617 
618         /// Replaces and unsets flags that are equivalent to multiple other flags.
619         ///
620         /// This may set flags that are not supported by the device, so this is for internal use
621         /// only and should not be passed on to Vulkan.
622         #[allow(dead_code)] // TODO: use this function
623         pub(crate) fn expand(mut self) -> Self {
624             if self.intersects(AccessFlags::SHADER_READ) {
625                 self -= AccessFlags::SHADER_READ;
626                 self |= AccessFlags::UNIFORM_READ
627                     | AccessFlags::SHADER_SAMPLED_READ
628                     | AccessFlags::SHADER_STORAGE_READ
629                     | AccessFlags::SHADER_BINDING_TABLE_READ;
630             }
631 
632             if self.intersects(AccessFlags::SHADER_WRITE) {
633                 self -= AccessFlags::SHADER_WRITE;
634                 self |= AccessFlags::SHADER_STORAGE_WRITE;
635             }
636 
637             self
638         }
639     }
640     = AccessFlags2(u64);
641 
642     /// Read access to an indirect buffer.
643     INDIRECT_COMMAND_READ = INDIRECT_COMMAND_READ,
644 
645     /// Read access to an index buffer.
646     INDEX_READ = INDEX_READ,
647 
648     /// Read access to a vertex buffer.
649     VERTEX_ATTRIBUTE_READ = VERTEX_ATTRIBUTE_READ,
650 
651     /// Read access to a uniform buffer in a shader.
652     UNIFORM_READ = UNIFORM_READ,
653 
654     /// Read access to an input attachment in a fragment shader, within a render pass.
655     INPUT_ATTACHMENT_READ = INPUT_ATTACHMENT_READ,
656 
657     /// Read access to a buffer or image in a shader.
658     ///
659     /// It is currently equivalent to setting all of the following flags, but automatically
660     /// omitting any that are not supported in a given context. It also implicitly includes future
661     /// flags that are added to Vulkan, if they are not yet supported by Vulkano.
662     /// - `uniform_read`
663     /// - `shader_sampled_read`
664     /// - `shader_storage_read`
665     /// - `shader_binding_table_read`
666     SHADER_READ = SHADER_READ,
667 
668     /// Write access to a buffer or image in a shader.
669     ///
670     /// It is currently equivalent to `shader_storage_write`. It also implicitly includes future
671     /// flags that are added to Vulkan, if they are not yet supported by Vulkano.
672     SHADER_WRITE = SHADER_WRITE,
673 
674     /// Read access to a color attachment during blending, logic operations or
675     /// subpass load operations.
676     COLOR_ATTACHMENT_READ = COLOR_ATTACHMENT_READ,
677 
678     /// Write access to a color, resolve or depth/stencil resolve attachment during a render pass
679     /// or subpass store operations.
680     COLOR_ATTACHMENT_WRITE = COLOR_ATTACHMENT_WRITE,
681 
682     /// Read access to a depth/stencil attachment during depth/stencil operations or
683     /// subpass load operations.
684     DEPTH_STENCIL_ATTACHMENT_READ = DEPTH_STENCIL_ATTACHMENT_READ,
685 
686     /// Write access to a depth/stencil attachment during depth/stencil operations or
687     /// subpass store operations.
688     DEPTH_STENCIL_ATTACHMENT_WRITE = DEPTH_STENCIL_ATTACHMENT_WRITE,
689 
690     /// Read access to a buffer or image during a copy, blit or resolve command.
691     TRANSFER_READ = TRANSFER_READ,
692 
693     /// Write access to a buffer or image during a copy, blit, resolve or clear command.
694     TRANSFER_WRITE = TRANSFER_WRITE,
695 
696     /// Read access performed by the host.
697     HOST_READ = HOST_READ,
698 
699     /// Write access performed by the host.
700     HOST_WRITE = HOST_WRITE,
701 
702     /// Any type of read access.
703     ///
704     /// This is equivalent to setting all `_read` flags that are allowed in the given context.
705     MEMORY_READ = MEMORY_READ,
706 
707     /// Any type of write access.
708     ///
709     /// This is equivalent to setting all `_write` flags that are allowed in the given context.
710     MEMORY_WRITE = MEMORY_WRITE,
711 
712     /// Read access to a uniform texel buffer or sampled image in a shader.
713     SHADER_SAMPLED_READ = SHADER_SAMPLED_READ {
714         api_version: V1_3,
715         device_extensions: [khr_synchronization2],
716     },
717 
718     /// Read access to a storage buffer, storage texel buffer or storage image in a shader.
719     SHADER_STORAGE_READ = SHADER_STORAGE_READ {
720         api_version: V1_3,
721         device_extensions: [khr_synchronization2],
722     },
723 
724     /// Write access to a storage buffer, storage texel buffer or storage image in a shader.
725     SHADER_STORAGE_WRITE = SHADER_STORAGE_WRITE {
726         api_version: V1_3,
727         device_extensions: [khr_synchronization2],
728     },
729 
730     /// Read access to an image or buffer as part of a video decode operation.
731     VIDEO_DECODE_READ = VIDEO_DECODE_READ_KHR {
732         device_extensions: [khr_video_decode_queue],
733     },
734 
735     /// Write access to an image or buffer as part of a video decode operation.
736     VIDEO_DECODE_WRITE = VIDEO_DECODE_WRITE_KHR {
737         device_extensions: [khr_video_decode_queue],
738     },
739 
740     /// Read access to an image or buffer as part of a video encode operation.
741     VIDEO_ENCODE_READ = VIDEO_ENCODE_READ_KHR {
742         device_extensions: [khr_video_encode_queue],
743     },
744 
745     /// Write access to an image or buffer as part of a video encode operation.
746     VIDEO_ENCODE_WRITE = VIDEO_ENCODE_WRITE_KHR {
747         device_extensions: [khr_video_encode_queue],
748     },
749 
750     /// Write access to a transform feedback buffer during transform feedback operations.
751     TRANSFORM_FEEDBACK_WRITE = TRANSFORM_FEEDBACK_WRITE_EXT {
752         device_extensions: [ext_transform_feedback],
753     },
754 
755     /// Read access to a transform feedback counter buffer during transform feedback operations.
756     TRANSFORM_FEEDBACK_COUNTER_READ = TRANSFORM_FEEDBACK_COUNTER_READ_EXT {
757         device_extensions: [ext_transform_feedback],
758     },
759 
760     /// Write access to a transform feedback counter buffer during transform feedback operations.
761     TRANSFORM_FEEDBACK_COUNTER_WRITE = TRANSFORM_FEEDBACK_COUNTER_WRITE_EXT {
762         device_extensions: [ext_transform_feedback],
763     },
764 
765     /// Read access to a predicate during conditional rendering.
766     CONDITIONAL_RENDERING_READ = CONDITIONAL_RENDERING_READ_EXT {
767         device_extensions: [ext_conditional_rendering],
768     },
769 
770     /// Read access to preprocess buffers input to `preprocess_generated_commands`.
771     COMMAND_PREPROCESS_READ = COMMAND_PREPROCESS_READ_NV {
772         device_extensions: [nv_device_generated_commands],
773     },
774 
775     /// Read access to sequences buffers output by `preprocess_generated_commands`.
776     COMMAND_PREPROCESS_WRITE = COMMAND_PREPROCESS_WRITE_NV {
777         device_extensions: [nv_device_generated_commands],
778     },
779 
780     /// Read access to a fragment shading rate attachment during rasterization.
781     FRAGMENT_SHADING_RATE_ATTACHMENT_READ = FRAGMENT_SHADING_RATE_ATTACHMENT_READ_KHR {
782         device_extensions: [khr_fragment_shading_rate],
783     },
784 
785     /// Read access to an acceleration structure or acceleration structure scratch buffer during
786     /// trace, build or copy commands.
787     ACCELERATION_STRUCTURE_READ = ACCELERATION_STRUCTURE_READ_KHR {
788         device_extensions: [khr_acceleration_structure, nv_ray_tracing],
789     },
790 
791     /// Write access to an acceleration structure or acceleration structure scratch buffer during
792     /// trace, build or copy commands.
793     ACCELERATION_STRUCTURE_WRITE = ACCELERATION_STRUCTURE_WRITE_KHR {
794         device_extensions: [khr_acceleration_structure, nv_ray_tracing],
795     },
796 
797     /// Read access to a fragment density map attachment during dynamic fragment density map
798     /// operations.
799     FRAGMENT_DENSITY_MAP_READ = FRAGMENT_DENSITY_MAP_READ_EXT {
800         device_extensions: [ext_fragment_density_map],
801     },
802 
803     /// Read access to color attachments when performing advanced blend operations.
804     COLOR_ATTACHMENT_READ_NONCOHERENT = COLOR_ATTACHMENT_READ_NONCOHERENT_EXT {
805         device_extensions: [ext_blend_operation_advanced],
806     },
807 
808     /// Read access to an invocation mask image.
809     INVOCATION_MASK_READ = INVOCATION_MASK_READ_HUAWEI {
810         device_extensions: [huawei_invocation_mask],
811     },
812 
813     /// Read access to a shader binding table.
814     SHADER_BINDING_TABLE_READ = SHADER_BINDING_TABLE_READ_KHR {
815         device_extensions: [khr_ray_tracing_maintenance1],
816     },
817 
818     /// Read access to a micromap object.
819     MICROMAP_READ = MICROMAP_READ_EXT {
820         device_extensions: [ext_opacity_micromap],
821     },
822 
823     /// Write access to a micromap object.
824     MICROMAP_WRITE = MICROMAP_WRITE_EXT {
825         device_extensions: [ext_opacity_micromap],
826     },
827 
828     /// Read access to a buffer or image during optical flow operations.
829     OPTICAL_FLOW_READ = OPTICAL_FLOW_READ_NV {
830         device_extensions: [nv_optical_flow],
831     },
832 
833     /// Write access to a buffer or image during optical flow operations.
834     OPTICAL_FLOW_WRITE = OPTICAL_FLOW_WRITE_NV {
835         device_extensions: [nv_optical_flow],
836     },
837 }
838 
839 impl From<PipelineStages> for AccessFlags {
840     /// Corresponds to the table "[Supported access types]" in the Vulkan specification.
841     ///
842     /// [Supported access types]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-access-types-supported
843     #[inline]
from(mut val: PipelineStages) -> Self844     fn from(mut val: PipelineStages) -> Self {
845         if val.is_empty() {
846             return AccessFlags::empty();
847         }
848 
849         val = val.expand(QueueFlags::GRAPHICS | QueueFlags::COMPUTE | QueueFlags::TRANSFER);
850         let mut result = AccessFlags::MEMORY_READ | AccessFlags::MEMORY_WRITE;
851 
852         if val.intersects(PipelineStages::DRAW_INDIRECT) {
853             result |=
854                 AccessFlags::INDIRECT_COMMAND_READ | AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ;
855         }
856 
857         if val.intersects(
858             PipelineStages::VERTEX_SHADER
859                 | PipelineStages::TESSELLATION_CONTROL_SHADER
860                 | PipelineStages::TESSELLATION_EVALUATION_SHADER
861                 | PipelineStages::GEOMETRY_SHADER
862                 | PipelineStages::FRAGMENT_SHADER
863                 | PipelineStages::COMPUTE_SHADER
864                 | PipelineStages::RAY_TRACING_SHADER
865                 | PipelineStages::TASK_SHADER
866                 | PipelineStages::MESH_SHADER,
867         ) {
868             result |= AccessFlags::SHADER_READ
869                 | AccessFlags::UNIFORM_READ
870                 | AccessFlags::SHADER_SAMPLED_READ
871                 | AccessFlags::SHADER_STORAGE_READ
872                 | AccessFlags::SHADER_WRITE
873                 | AccessFlags::SHADER_STORAGE_WRITE
874                 | AccessFlags::ACCELERATION_STRUCTURE_READ;
875         }
876 
877         if val.intersects(PipelineStages::FRAGMENT_SHADER | PipelineStages::SUBPASS_SHADING) {
878             result |= AccessFlags::INPUT_ATTACHMENT_READ;
879         }
880 
881         if val
882             .intersects(PipelineStages::EARLY_FRAGMENT_TESTS | PipelineStages::LATE_FRAGMENT_TESTS)
883         {
884             result |= AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ
885                 | AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE;
886         }
887 
888         if val.intersects(PipelineStages::COLOR_ATTACHMENT_OUTPUT) {
889             result |= AccessFlags::COLOR_ATTACHMENT_READ
890                 | AccessFlags::COLOR_ATTACHMENT_WRITE
891                 | AccessFlags::COLOR_ATTACHMENT_READ_NONCOHERENT;
892         }
893 
894         if val.intersects(PipelineStages::HOST) {
895             result |= AccessFlags::HOST_READ | AccessFlags::HOST_WRITE;
896         }
897 
898         if val.intersects(
899             PipelineStages::COPY
900                 | PipelineStages::RESOLVE
901                 | PipelineStages::BLIT
902                 | PipelineStages::ACCELERATION_STRUCTURE_COPY,
903         ) {
904             result |= AccessFlags::TRANSFER_READ | AccessFlags::TRANSFER_WRITE;
905         }
906 
907         if val.intersects(PipelineStages::CLEAR) {
908             result |= AccessFlags::TRANSFER_WRITE;
909         }
910 
911         if val.intersects(PipelineStages::INDEX_INPUT) {
912             result |= AccessFlags::INDEX_READ;
913         }
914 
915         if val.intersects(PipelineStages::VERTEX_ATTRIBUTE_INPUT) {
916             result |= AccessFlags::VERTEX_ATTRIBUTE_READ;
917         }
918 
919         if val.intersects(PipelineStages::VIDEO_DECODE) {
920             result |= AccessFlags::VIDEO_DECODE_READ | AccessFlags::VIDEO_DECODE_WRITE;
921         }
922 
923         if val.intersects(PipelineStages::VIDEO_ENCODE) {
924             result |= AccessFlags::VIDEO_ENCODE_READ | AccessFlags::VIDEO_ENCODE_WRITE;
925         }
926 
927         if val.intersects(PipelineStages::TRANSFORM_FEEDBACK) {
928             result |= AccessFlags::TRANSFORM_FEEDBACK_WRITE
929                 | AccessFlags::TRANSFORM_FEEDBACK_COUNTER_WRITE
930                 | AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ;
931         }
932 
933         if val.intersects(PipelineStages::CONDITIONAL_RENDERING) {
934             result |= AccessFlags::CONDITIONAL_RENDERING_READ;
935         }
936 
937         if val.intersects(PipelineStages::ACCELERATION_STRUCTURE_BUILD) {
938             result |= AccessFlags::INDIRECT_COMMAND_READ
939                 | AccessFlags::SHADER_READ
940                 | AccessFlags::SHADER_SAMPLED_READ
941                 | AccessFlags::SHADER_STORAGE_READ
942                 | AccessFlags::SHADER_STORAGE_WRITE
943                 | AccessFlags::TRANSFER_READ
944                 | AccessFlags::TRANSFER_WRITE
945                 | AccessFlags::ACCELERATION_STRUCTURE_READ
946                 | AccessFlags::ACCELERATION_STRUCTURE_WRITE
947                 | AccessFlags::MICROMAP_READ;
948         }
949 
950         if val.intersects(PipelineStages::RAY_TRACING_SHADER) {
951             result |= AccessFlags::SHADER_BINDING_TABLE_READ;
952         }
953 
954         if val.intersects(PipelineStages::FRAGMENT_DENSITY_PROCESS) {
955             result |= AccessFlags::FRAGMENT_DENSITY_MAP_READ;
956         }
957 
958         if val.intersects(PipelineStages::FRAGMENT_SHADING_RATE_ATTACHMENT) {
959             result |= AccessFlags::FRAGMENT_SHADING_RATE_ATTACHMENT_READ;
960         }
961 
962         if val.intersects(PipelineStages::COMMAND_PREPROCESS) {
963             result |= AccessFlags::COMMAND_PREPROCESS_READ | AccessFlags::COMMAND_PREPROCESS_WRITE;
964         }
965 
966         if val.intersects(PipelineStages::INVOCATION_MASK) {
967             result |= AccessFlags::INVOCATION_MASK_READ;
968         }
969 
970         if val.intersects(PipelineStages::MICROMAP_BUILD) {
971             result |= AccessFlags::MICROMAP_READ | AccessFlags::MICROMAP_WRITE;
972         }
973 
974         if val.intersects(PipelineStages::OPTICAL_FLOW) {
975             result |= AccessFlags::OPTICAL_FLOW_READ | AccessFlags::OPTICAL_FLOW_WRITE;
976         }
977 
978         result
979     }
980 }
981 
982 impl From<AccessFlags> for ash::vk::AccessFlags {
983     #[inline]
from(val: AccessFlags) -> Self984     fn from(val: AccessFlags) -> Self {
985         Self::from_raw(ash::vk::AccessFlags2::from(val).as_raw() as u32)
986     }
987 }
988 
989 /// The full specification of memory access by the pipeline for a particular resource.
990 #[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
991 pub struct PipelineMemoryAccess {
992     /// The pipeline stages the resource will be accessed in.
993     pub stages: PipelineStages,
994     /// The type of memory access that will be performed.
995     pub access: AccessFlags,
996     /// Whether the resource needs exclusive (mutable) access or can be shared.
997     pub exclusive: bool,
998 }
999 
1000 #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
1001 #[allow(non_camel_case_types, dead_code)]
1002 #[repr(u8)]
1003 pub(crate) enum PipelineStageAccess {
1004     // There is no stage/access for this, but it is a memory write operation nonetheless.
1005     ImageLayoutTransition,
1006 
1007     DrawIndirect_IndirectCommandRead,
1008     DrawIndirect_TransformFeedbackCounterRead,
1009     VertexShader_UniformRead,
1010     VertexShader_ShaderSampledRead,
1011     VertexShader_ShaderStorageRead,
1012     VertexShader_ShaderStorageWrite,
1013     VertexShader_AccelerationStructureRead,
1014     TessellationControlShader_UniformRead,
1015     TessellationControlShader_ShaderSampledRead,
1016     TessellationControlShader_ShaderStorageRead,
1017     TessellationControlShader_ShaderStorageWrite,
1018     TessellationControlShader_AccelerationStructureRead,
1019     TessellationEvaluationShader_UniformRead,
1020     TessellationEvaluationShader_ShaderSampledRead,
1021     TessellationEvaluationShader_ShaderStorageRead,
1022     TessellationEvaluationShader_ShaderStorageWrite,
1023     TessellationEvaluationShader_AccelerationStructureRead,
1024     GeometryShader_UniformRead,
1025     GeometryShader_ShaderSampledRead,
1026     GeometryShader_ShaderStorageRead,
1027     GeometryShader_ShaderStorageWrite,
1028     GeometryShader_AccelerationStructureRead,
1029     FragmentShader_UniformRead,
1030     FragmentShader_InputAttachmentRead,
1031     FragmentShader_ShaderSampledRead,
1032     FragmentShader_ShaderStorageRead,
1033     FragmentShader_ShaderStorageWrite,
1034     FragmentShader_AccelerationStructureRead,
1035     EarlyFragmentTests_DepthStencilAttachmentRead,
1036     EarlyFragmentTests_DepthStencilAttachmentWrite,
1037     LateFragmentTests_DepthStencilAttachmentRead,
1038     LateFragmentTests_DepthStencilAttachmentWrite,
1039     ColorAttachmentOutput_ColorAttachmentRead,
1040     ColorAttachmentOutput_ColorAttachmentWrite,
1041     ColorAttachmentOutput_ColorAttachmentReadNoncoherent,
1042     ComputeShader_UniformRead,
1043     ComputeShader_ShaderSampledRead,
1044     ComputeShader_ShaderStorageRead,
1045     ComputeShader_ShaderStorageWrite,
1046     ComputeShader_AccelerationStructureRead,
1047     Host_HostRead,
1048     Host_HostWrite,
1049     Copy_TransferRead,
1050     Copy_TransferWrite,
1051     Resolve_TransferRead,
1052     Resolve_TransferWrite,
1053     Blit_TransferRead,
1054     Blit_TransferWrite,
1055     Clear_TransferWrite,
1056     IndexInput_IndexRead,
1057     VertexAttributeInput_VertexAttributeRead,
1058     VideoDecode_VideoDecodeRead,
1059     VideoDecode_VideoDecodeWrite,
1060     VideoEncode_VideoEncodeRead,
1061     VideoEncode_VideoEncodeWrite,
1062     TransformFeedback_TransformFeedbackWrite,
1063     TransformFeedback_TransformFeedbackCounterRead,
1064     TransformFeedback_TransformFeedbackCounterWrite,
1065     ConditionalRendering_ConditionalRenderingRead,
1066     AccelerationStructureBuild_IndirectCommandRead,
1067     AccelerationStructureBuild_UniformRead,
1068     AccelerationStructureBuild_TransferRead,
1069     AccelerationStructureBuild_TransferWrite,
1070     AccelerationStructureBuild_ShaderSampledRead,
1071     AccelerationStructureBuild_ShaderStorageRead,
1072     AccelerationStructureBuild_AccelerationStructureRead,
1073     AccelerationStructureBuild_AccelerationStructureWrite,
1074     AccelerationStructureBuild_MicromapRead,
1075     RayTracingShader_UniformRead,
1076     RayTracingShader_ShaderSampledRead,
1077     RayTracingShader_ShaderStorageRead,
1078     RayTracingShader_ShaderStorageWrite,
1079     RayTracingShader_AccelerationStructureRead,
1080     RayTracingShader_ShaderBindingTableRead,
1081     FragmentDensityProcess_FragmentDensityMapRead,
1082     FragmentShadingRateAttachment_FragmentShadingRateAttachmentRead,
1083     CommandPreprocess_CommandPreprocessRead,
1084     CommandPreprocess_CommandPreprocessWrite,
1085     TaskShader_UniformRead,
1086     TaskShader_ShaderSampledRead,
1087     TaskShader_ShaderStorageRead,
1088     TaskShader_ShaderStorageWrite,
1089     TaskShader_AccelerationStructureRead,
1090     MeshShader_UniformRead,
1091     MeshShader_ShaderSampledRead,
1092     MeshShader_ShaderStorageRead,
1093     MeshShader_ShaderStorageWrite,
1094     MeshShader_AccelerationStructureRead,
1095     SubpassShading_InputAttachmentRead,
1096     InvocationMask_InvocationMaskRead,
1097     AccelerationStructureCopy_TransferRead,
1098     AccelerationStructureCopy_TransferWrite,
1099     OpticalFlow_OpticalFlowRead,
1100     OpticalFlow_OpticalFlowWrite,
1101     MicromapBuild_MicromapRead,
1102     MicromapBuild_MicromapWrite,
1103 
1104     // If there are ever more than 128 preceding values, then there will be a compile error:
1105     // "discriminant value `128` assigned more than once"
1106     __MAX_VALUE__ = 128,
1107 }
1108 
1109 impl PipelineStageAccess {
1110     #[inline]
is_write(self) -> bool1111     pub(crate) const fn is_write(self) -> bool {
1112         matches!(
1113             self,
1114             PipelineStageAccess::ImageLayoutTransition
1115                 | PipelineStageAccess::VertexShader_ShaderStorageWrite
1116                 | PipelineStageAccess::TessellationControlShader_ShaderStorageWrite
1117                 | PipelineStageAccess::TessellationEvaluationShader_ShaderStorageWrite
1118                 | PipelineStageAccess::GeometryShader_ShaderStorageWrite
1119                 | PipelineStageAccess::FragmentShader_ShaderStorageWrite
1120                 | PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentWrite
1121                 | PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentWrite
1122                 | PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentWrite
1123                 | PipelineStageAccess::ComputeShader_ShaderStorageWrite
1124                 | PipelineStageAccess::Host_HostWrite
1125                 | PipelineStageAccess::Copy_TransferWrite
1126                 | PipelineStageAccess::Resolve_TransferWrite
1127                 | PipelineStageAccess::Blit_TransferWrite
1128                 | PipelineStageAccess::Clear_TransferWrite
1129                 | PipelineStageAccess::VideoDecode_VideoDecodeWrite
1130                 | PipelineStageAccess::VideoEncode_VideoEncodeWrite
1131                 | PipelineStageAccess::TransformFeedback_TransformFeedbackWrite
1132                 | PipelineStageAccess::TransformFeedback_TransformFeedbackCounterWrite
1133                 | PipelineStageAccess::AccelerationStructureBuild_TransferWrite
1134                 | PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureWrite
1135                 | PipelineStageAccess::RayTracingShader_ShaderStorageWrite
1136                 | PipelineStageAccess::CommandPreprocess_CommandPreprocessWrite
1137                 | PipelineStageAccess::TaskShader_ShaderStorageWrite
1138                 | PipelineStageAccess::MeshShader_ShaderStorageWrite
1139                 | PipelineStageAccess::AccelerationStructureCopy_TransferWrite
1140                 | PipelineStageAccess::OpticalFlow_OpticalFlowWrite
1141                 | PipelineStageAccess::MicromapBuild_MicromapWrite
1142         )
1143     }
1144 
iter_descriptor_stages( descriptor_type: DescriptorType, stages_read: ShaderStages, stages_write: ShaderStages, ) -> impl Iterator<Item = Self> + 'static1145     pub(crate) fn iter_descriptor_stages(
1146         descriptor_type: DescriptorType,
1147         stages_read: ShaderStages,
1148         stages_write: ShaderStages,
1149     ) -> impl Iterator<Item = Self> + 'static {
1150         static MAP_READ: Lazy<
1151             HashMap<DescriptorType, HashMap<PipelineStage, PipelineStageAccess>>,
1152         > = Lazy::new(|| {
1153             let uniform_read = [
1154                 DescriptorType::UniformBuffer,
1155                 DescriptorType::UniformBufferDynamic,
1156             ]
1157             .into_iter()
1158             .map(|descriptor_type| {
1159                 (
1160                     descriptor_type,
1161                     [
1162                         (
1163                             PipelineStage::VertexShader,
1164                             PipelineStageAccess::VertexShader_UniformRead,
1165                         ),
1166                         (
1167                             PipelineStage::TessellationControlShader,
1168                             PipelineStageAccess::TessellationControlShader_UniformRead,
1169                         ),
1170                         (
1171                             PipelineStage::TessellationEvaluationShader,
1172                             PipelineStageAccess::TessellationControlShader_UniformRead,
1173                         ),
1174                         (
1175                             PipelineStage::GeometryShader,
1176                             PipelineStageAccess::GeometryShader_UniformRead,
1177                         ),
1178                         (
1179                             PipelineStage::FragmentShader,
1180                             PipelineStageAccess::FragmentShader_UniformRead,
1181                         ),
1182                         (
1183                             PipelineStage::ComputeShader,
1184                             PipelineStageAccess::ComputeShader_UniformRead,
1185                         ),
1186                         (
1187                             PipelineStage::RayTracingShader,
1188                             PipelineStageAccess::RayTracingShader_UniformRead,
1189                         ),
1190                         (
1191                             PipelineStage::TaskShader,
1192                             PipelineStageAccess::TaskShader_UniformRead,
1193                         ),
1194                         (
1195                             PipelineStage::MeshShader,
1196                             PipelineStageAccess::MeshShader_UniformRead,
1197                         ),
1198                     ]
1199                     .into_iter()
1200                     .collect(),
1201                 )
1202             });
1203 
1204             let shader_sampled_read = [
1205                 DescriptorType::CombinedImageSampler,
1206                 DescriptorType::SampledImage,
1207                 DescriptorType::UniformTexelBuffer,
1208             ]
1209             .into_iter()
1210             .map(|descriptor_type| {
1211                 (
1212                     descriptor_type,
1213                     [
1214                         (
1215                             PipelineStage::VertexShader,
1216                             PipelineStageAccess::VertexShader_ShaderSampledRead,
1217                         ),
1218                         (
1219                             PipelineStage::TessellationControlShader,
1220                             PipelineStageAccess::TessellationControlShader_ShaderSampledRead,
1221                         ),
1222                         (
1223                             PipelineStage::TessellationEvaluationShader,
1224                             PipelineStageAccess::TessellationControlShader_ShaderSampledRead,
1225                         ),
1226                         (
1227                             PipelineStage::GeometryShader,
1228                             PipelineStageAccess::GeometryShader_ShaderSampledRead,
1229                         ),
1230                         (
1231                             PipelineStage::FragmentShader,
1232                             PipelineStageAccess::FragmentShader_ShaderSampledRead,
1233                         ),
1234                         (
1235                             PipelineStage::ComputeShader,
1236                             PipelineStageAccess::ComputeShader_ShaderSampledRead,
1237                         ),
1238                         (
1239                             PipelineStage::RayTracingShader,
1240                             PipelineStageAccess::RayTracingShader_ShaderSampledRead,
1241                         ),
1242                         (
1243                             PipelineStage::TaskShader,
1244                             PipelineStageAccess::TaskShader_ShaderSampledRead,
1245                         ),
1246                         (
1247                             PipelineStage::MeshShader,
1248                             PipelineStageAccess::MeshShader_ShaderSampledRead,
1249                         ),
1250                     ]
1251                     .into_iter()
1252                     .collect(),
1253                 )
1254             });
1255 
1256             let shader_storage_read = [
1257                 DescriptorType::StorageImage,
1258                 DescriptorType::StorageTexelBuffer,
1259                 DescriptorType::StorageBuffer,
1260                 DescriptorType::StorageBufferDynamic,
1261             ]
1262             .into_iter()
1263             .map(|descriptor_type| {
1264                 (
1265                     descriptor_type,
1266                     [
1267                         (
1268                             PipelineStage::VertexShader,
1269                             PipelineStageAccess::VertexShader_ShaderStorageRead,
1270                         ),
1271                         (
1272                             PipelineStage::TessellationControlShader,
1273                             PipelineStageAccess::TessellationControlShader_ShaderStorageRead,
1274                         ),
1275                         (
1276                             PipelineStage::TessellationEvaluationShader,
1277                             PipelineStageAccess::TessellationControlShader_ShaderStorageRead,
1278                         ),
1279                         (
1280                             PipelineStage::GeometryShader,
1281                             PipelineStageAccess::GeometryShader_ShaderStorageRead,
1282                         ),
1283                         (
1284                             PipelineStage::FragmentShader,
1285                             PipelineStageAccess::FragmentShader_ShaderStorageRead,
1286                         ),
1287                         (
1288                             PipelineStage::ComputeShader,
1289                             PipelineStageAccess::ComputeShader_ShaderStorageRead,
1290                         ),
1291                         (
1292                             PipelineStage::RayTracingShader,
1293                             PipelineStageAccess::RayTracingShader_ShaderStorageRead,
1294                         ),
1295                         (
1296                             PipelineStage::TaskShader,
1297                             PipelineStageAccess::TaskShader_ShaderStorageRead,
1298                         ),
1299                         (
1300                             PipelineStage::MeshShader,
1301                             PipelineStageAccess::MeshShader_ShaderStorageRead,
1302                         ),
1303                     ]
1304                     .into_iter()
1305                     .collect(),
1306                 )
1307             });
1308 
1309             let input_attachment_read =
1310                 [DescriptorType::InputAttachment]
1311                     .into_iter()
1312                     .map(|descriptor_type| {
1313                         (
1314                             descriptor_type,
1315                             [(
1316                                 PipelineStage::FragmentShader,
1317                                 PipelineStageAccess::FragmentShader_InputAttachmentRead,
1318                             )]
1319                             .into_iter()
1320                             .collect(),
1321                         )
1322                     });
1323 
1324             uniform_read
1325                 .chain(shader_sampled_read)
1326                 .chain(shader_storage_read)
1327                 .chain(input_attachment_read)
1328                 .collect()
1329         });
1330         static MAP_WRITE: Lazy<
1331             HashMap<DescriptorType, HashMap<PipelineStage, PipelineStageAccess>>,
1332         > = Lazy::new(|| {
1333             let shader_storage_write = [
1334                 DescriptorType::StorageImage,
1335                 DescriptorType::StorageTexelBuffer,
1336                 DescriptorType::StorageBuffer,
1337                 DescriptorType::StorageBufferDynamic,
1338             ]
1339             .into_iter()
1340             .map(|descriptor_type| {
1341                 (
1342                     descriptor_type,
1343                     [
1344                         (
1345                             PipelineStage::VertexShader,
1346                             PipelineStageAccess::VertexShader_ShaderStorageWrite,
1347                         ),
1348                         (
1349                             PipelineStage::TessellationControlShader,
1350                             PipelineStageAccess::TessellationControlShader_ShaderStorageWrite,
1351                         ),
1352                         (
1353                             PipelineStage::TessellationEvaluationShader,
1354                             PipelineStageAccess::TessellationControlShader_ShaderStorageWrite,
1355                         ),
1356                         (
1357                             PipelineStage::GeometryShader,
1358                             PipelineStageAccess::GeometryShader_ShaderStorageWrite,
1359                         ),
1360                         (
1361                             PipelineStage::FragmentShader,
1362                             PipelineStageAccess::FragmentShader_ShaderStorageWrite,
1363                         ),
1364                         (
1365                             PipelineStage::ComputeShader,
1366                             PipelineStageAccess::ComputeShader_ShaderStorageWrite,
1367                         ),
1368                         (
1369                             PipelineStage::RayTracingShader,
1370                             PipelineStageAccess::RayTracingShader_ShaderStorageWrite,
1371                         ),
1372                         (
1373                             PipelineStage::TaskShader,
1374                             PipelineStageAccess::TaskShader_ShaderStorageWrite,
1375                         ),
1376                         (
1377                             PipelineStage::MeshShader,
1378                             PipelineStageAccess::MeshShader_ShaderStorageWrite,
1379                         ),
1380                     ]
1381                     .into_iter()
1382                     .collect(),
1383                 )
1384             });
1385 
1386             shader_storage_write.collect()
1387         });
1388 
1389         [
1390             (stages_read, &*MAP_READ, "read"),
1391             (stages_write, &*MAP_WRITE, "write"),
1392         ]
1393         .into_iter()
1394         .filter(|(stages, _, _)| !stages.is_empty())
1395         .flat_map(move |(stages, descriptor_map, access)| {
1396             let stages_map = descriptor_map.get(&descriptor_type).unwrap_or_else(|| {
1397                 panic!(
1398                     "DescriptorType::{:?} does not {} memory",
1399                     descriptor_type, access,
1400                 )
1401             });
1402 
1403             PipelineStages::from(stages).into_iter().map(move |stage| {
1404                 *stages_map.get(&stage).unwrap_or_else(|| {
1405                     panic!(
1406                         "DescriptorType::{:?} does not {} memory in PipelineStage::{:?}",
1407                         descriptor_type, access, stage,
1408                     )
1409                 })
1410             })
1411         })
1412     }
1413 }
1414 
1415 impl TryFrom<PipelineStageAccess> for PipelineStage {
1416     type Error = ();
1417 
1418     #[inline]
try_from(val: PipelineStageAccess) -> Result<Self, Self::Error>1419     fn try_from(val: PipelineStageAccess) -> Result<Self, Self::Error> {
1420         Ok(match val {
1421             PipelineStageAccess::ImageLayoutTransition => return Err(()),
1422             PipelineStageAccess::DrawIndirect_IndirectCommandRead
1423             | PipelineStageAccess::DrawIndirect_TransformFeedbackCounterRead => PipelineStage::DrawIndirect,
1424             PipelineStageAccess::VertexShader_UniformRead
1425             | PipelineStageAccess::VertexShader_ShaderSampledRead
1426             | PipelineStageAccess::VertexShader_ShaderStorageRead
1427             | PipelineStageAccess::VertexShader_ShaderStorageWrite
1428             | PipelineStageAccess::VertexShader_AccelerationStructureRead => PipelineStage::VertexShader,
1429             PipelineStageAccess::TessellationControlShader_UniformRead
1430             | PipelineStageAccess::TessellationControlShader_ShaderSampledRead
1431             | PipelineStageAccess::TessellationControlShader_ShaderStorageRead
1432             | PipelineStageAccess::TessellationControlShader_ShaderStorageWrite
1433             | PipelineStageAccess::TessellationControlShader_AccelerationStructureRead => PipelineStage::TessellationControlShader,
1434             PipelineStageAccess::TessellationEvaluationShader_UniformRead
1435             | PipelineStageAccess::TessellationEvaluationShader_ShaderSampledRead
1436             | PipelineStageAccess::TessellationEvaluationShader_ShaderStorageRead
1437             | PipelineStageAccess::TessellationEvaluationShader_ShaderStorageWrite
1438             | PipelineStageAccess::TessellationEvaluationShader_AccelerationStructureRead => PipelineStage::TessellationEvaluationShader,
1439             PipelineStageAccess::GeometryShader_UniformRead
1440             | PipelineStageAccess::GeometryShader_ShaderSampledRead
1441             | PipelineStageAccess::GeometryShader_ShaderStorageRead
1442             | PipelineStageAccess::GeometryShader_ShaderStorageWrite
1443             | PipelineStageAccess::GeometryShader_AccelerationStructureRead => PipelineStage::GeometryShader,
1444             PipelineStageAccess::FragmentShader_UniformRead
1445             | PipelineStageAccess::FragmentShader_InputAttachmentRead
1446             | PipelineStageAccess::FragmentShader_ShaderSampledRead
1447             | PipelineStageAccess::FragmentShader_ShaderStorageRead
1448             | PipelineStageAccess::FragmentShader_ShaderStorageWrite
1449             | PipelineStageAccess::FragmentShader_AccelerationStructureRead => PipelineStage::FragmentShader,
1450             PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentRead
1451             | PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentWrite => PipelineStage::EarlyFragmentTests,
1452             PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentRead
1453             | PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentWrite => PipelineStage::LateFragmentTests,
1454             PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentRead
1455             | PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentWrite
1456             | PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentReadNoncoherent => PipelineStage::ColorAttachmentOutput,
1457             PipelineStageAccess::ComputeShader_UniformRead
1458             | PipelineStageAccess::ComputeShader_ShaderSampledRead
1459             | PipelineStageAccess::ComputeShader_ShaderStorageRead
1460             | PipelineStageAccess::ComputeShader_ShaderStorageWrite
1461             | PipelineStageAccess::ComputeShader_AccelerationStructureRead => PipelineStage::ComputeShader,
1462             PipelineStageAccess::Host_HostRead
1463             | PipelineStageAccess::Host_HostWrite => PipelineStage::Host,
1464             PipelineStageAccess::Copy_TransferRead
1465             | PipelineStageAccess::Copy_TransferWrite => PipelineStage::Copy,
1466             PipelineStageAccess::Resolve_TransferRead
1467             | PipelineStageAccess::Resolve_TransferWrite => PipelineStage::Resolve,
1468             PipelineStageAccess::Blit_TransferRead
1469             | PipelineStageAccess::Blit_TransferWrite => PipelineStage::Blit,
1470             PipelineStageAccess::Clear_TransferWrite => PipelineStage::Clear,
1471             PipelineStageAccess::IndexInput_IndexRead => PipelineStage::IndexInput,
1472             PipelineStageAccess::VertexAttributeInput_VertexAttributeRead => PipelineStage::VertexAttributeInput,
1473             PipelineStageAccess::VideoDecode_VideoDecodeRead
1474             | PipelineStageAccess::VideoDecode_VideoDecodeWrite => PipelineStage::VideoDecode,
1475             PipelineStageAccess::VideoEncode_VideoEncodeRead
1476             | PipelineStageAccess::VideoEncode_VideoEncodeWrite => PipelineStage::VideoEncode,
1477             PipelineStageAccess::TransformFeedback_TransformFeedbackWrite
1478             | PipelineStageAccess::TransformFeedback_TransformFeedbackCounterRead
1479             | PipelineStageAccess::TransformFeedback_TransformFeedbackCounterWrite => PipelineStage::TransformFeedback,
1480             PipelineStageAccess::ConditionalRendering_ConditionalRenderingRead => PipelineStage::ConditionalRendering,
1481             PipelineStageAccess::AccelerationStructureBuild_IndirectCommandRead
1482             | PipelineStageAccess::AccelerationStructureBuild_UniformRead
1483             | PipelineStageAccess::AccelerationStructureBuild_TransferRead
1484             | PipelineStageAccess::AccelerationStructureBuild_TransferWrite
1485             | PipelineStageAccess::AccelerationStructureBuild_ShaderSampledRead
1486             | PipelineStageAccess::AccelerationStructureBuild_ShaderStorageRead
1487             | PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureRead
1488             | PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureWrite
1489             | PipelineStageAccess::AccelerationStructureBuild_MicromapRead => PipelineStage::AccelerationStructureBuild,
1490             PipelineStageAccess::RayTracingShader_UniformRead
1491             | PipelineStageAccess::RayTracingShader_ShaderSampledRead
1492             | PipelineStageAccess::RayTracingShader_ShaderStorageRead
1493             | PipelineStageAccess::RayTracingShader_ShaderStorageWrite
1494             | PipelineStageAccess::RayTracingShader_AccelerationStructureRead => PipelineStage::RayTracingShader,
1495             | PipelineStageAccess::RayTracingShader_ShaderBindingTableRead => PipelineStage::RayTracingShader,
1496             PipelineStageAccess::FragmentDensityProcess_FragmentDensityMapRead => PipelineStage::FragmentDensityProcess,
1497             PipelineStageAccess::FragmentShadingRateAttachment_FragmentShadingRateAttachmentRead => PipelineStage::FragmentShadingRateAttachment,
1498             PipelineStageAccess::CommandPreprocess_CommandPreprocessRead
1499             | PipelineStageAccess::CommandPreprocess_CommandPreprocessWrite => PipelineStage::CommandPreprocess,
1500             PipelineStageAccess::TaskShader_UniformRead
1501             | PipelineStageAccess::TaskShader_ShaderSampledRead
1502             | PipelineStageAccess::TaskShader_ShaderStorageRead
1503             | PipelineStageAccess::TaskShader_ShaderStorageWrite
1504             | PipelineStageAccess::TaskShader_AccelerationStructureRead => PipelineStage::TaskShader,
1505             PipelineStageAccess::MeshShader_UniformRead
1506             | PipelineStageAccess::MeshShader_ShaderSampledRead
1507             | PipelineStageAccess::MeshShader_ShaderStorageRead
1508             | PipelineStageAccess::MeshShader_ShaderStorageWrite
1509             | PipelineStageAccess::MeshShader_AccelerationStructureRead => PipelineStage::MeshShader,
1510             PipelineStageAccess::SubpassShading_InputAttachmentRead => PipelineStage::SubpassShading,
1511             PipelineStageAccess::InvocationMask_InvocationMaskRead => PipelineStage::InvocationMask,
1512             PipelineStageAccess::AccelerationStructureCopy_TransferRead
1513             | PipelineStageAccess::AccelerationStructureCopy_TransferWrite => PipelineStage::AccelerationStructureCopy,
1514             PipelineStageAccess::OpticalFlow_OpticalFlowRead
1515             | PipelineStageAccess::OpticalFlow_OpticalFlowWrite => PipelineStage::OpticalFlow,
1516             PipelineStageAccess::MicromapBuild_MicromapRead
1517             | PipelineStageAccess::MicromapBuild_MicromapWrite => PipelineStage::MicromapBuild,
1518             PipelineStageAccess::__MAX_VALUE__ => unreachable!(),
1519         })
1520     }
1521 }
1522 
1523 impl From<PipelineStageAccess> for AccessFlags {
1524     #[inline]
from(val: PipelineStageAccess) -> Self1525     fn from(val: PipelineStageAccess) -> Self {
1526         match val {
1527             PipelineStageAccess::ImageLayoutTransition => AccessFlags::empty(),
1528             PipelineStageAccess::DrawIndirect_IndirectCommandRead
1529             | PipelineStageAccess::AccelerationStructureBuild_IndirectCommandRead => AccessFlags::INDIRECT_COMMAND_READ,
1530             PipelineStageAccess::IndexInput_IndexRead => AccessFlags::INDEX_READ,
1531             PipelineStageAccess::VertexAttributeInput_VertexAttributeRead => AccessFlags::VERTEX_ATTRIBUTE_READ,
1532             PipelineStageAccess::VertexShader_UniformRead
1533             | PipelineStageAccess::TessellationControlShader_UniformRead
1534             | PipelineStageAccess::TessellationEvaluationShader_UniformRead
1535             | PipelineStageAccess::GeometryShader_UniformRead
1536             | PipelineStageAccess::FragmentShader_UniformRead
1537             | PipelineStageAccess::ComputeShader_UniformRead
1538             | PipelineStageAccess::AccelerationStructureBuild_UniformRead
1539             | PipelineStageAccess::RayTracingShader_UniformRead
1540             | PipelineStageAccess::TaskShader_UniformRead
1541             | PipelineStageAccess::MeshShader_UniformRead => AccessFlags::UNIFORM_READ,
1542             PipelineStageAccess::FragmentShader_InputAttachmentRead
1543             | PipelineStageAccess::SubpassShading_InputAttachmentRead => AccessFlags::INPUT_ATTACHMENT_READ,
1544             PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentRead => AccessFlags::COLOR_ATTACHMENT_READ,
1545             PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentWrite => AccessFlags::COLOR_ATTACHMENT_WRITE,
1546             PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentRead
1547             | PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentRead => AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ,
1548             PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentWrite
1549             | PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentWrite => AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE,
1550             PipelineStageAccess::Copy_TransferRead
1551             | PipelineStageAccess::Resolve_TransferRead
1552             | PipelineStageAccess::Blit_TransferRead
1553             | PipelineStageAccess::AccelerationStructureBuild_TransferRead
1554             | PipelineStageAccess::AccelerationStructureCopy_TransferRead => AccessFlags::TRANSFER_READ,
1555             PipelineStageAccess::Copy_TransferWrite
1556             | PipelineStageAccess::Resolve_TransferWrite
1557             | PipelineStageAccess::Blit_TransferWrite
1558             | PipelineStageAccess::Clear_TransferWrite
1559             | PipelineStageAccess::AccelerationStructureBuild_TransferWrite
1560             | PipelineStageAccess::AccelerationStructureCopy_TransferWrite => AccessFlags::TRANSFER_WRITE,
1561             PipelineStageAccess::Host_HostRead => AccessFlags::HOST_READ,
1562             PipelineStageAccess::Host_HostWrite => AccessFlags::HOST_WRITE,
1563             PipelineStageAccess::VertexShader_ShaderSampledRead
1564             | PipelineStageAccess::TessellationControlShader_ShaderSampledRead
1565             | PipelineStageAccess::TessellationEvaluationShader_ShaderSampledRead
1566             | PipelineStageAccess::GeometryShader_ShaderSampledRead
1567             | PipelineStageAccess::FragmentShader_ShaderSampledRead
1568             | PipelineStageAccess::ComputeShader_ShaderSampledRead
1569             | PipelineStageAccess::AccelerationStructureBuild_ShaderSampledRead
1570             | PipelineStageAccess::RayTracingShader_ShaderSampledRead
1571             | PipelineStageAccess::TaskShader_ShaderSampledRead
1572             | PipelineStageAccess::MeshShader_ShaderSampledRead => AccessFlags::SHADER_SAMPLED_READ,
1573             PipelineStageAccess::VertexShader_ShaderStorageRead
1574             | PipelineStageAccess::TessellationControlShader_ShaderStorageRead
1575             | PipelineStageAccess::TessellationEvaluationShader_ShaderStorageRead
1576             | PipelineStageAccess::GeometryShader_ShaderStorageRead
1577             | PipelineStageAccess::FragmentShader_ShaderStorageRead
1578             | PipelineStageAccess::ComputeShader_ShaderStorageRead
1579             | PipelineStageAccess::AccelerationStructureBuild_ShaderStorageRead
1580             | PipelineStageAccess::RayTracingShader_ShaderStorageRead
1581             | PipelineStageAccess::TaskShader_ShaderStorageRead
1582             | PipelineStageAccess::MeshShader_ShaderStorageRead => AccessFlags::SHADER_STORAGE_READ,
1583             PipelineStageAccess::VertexShader_ShaderStorageWrite
1584             | PipelineStageAccess::TessellationControlShader_ShaderStorageWrite
1585             | PipelineStageAccess::TessellationEvaluationShader_ShaderStorageWrite
1586             | PipelineStageAccess::GeometryShader_ShaderStorageWrite
1587             | PipelineStageAccess::FragmentShader_ShaderStorageWrite
1588             | PipelineStageAccess::ComputeShader_ShaderStorageWrite
1589             | PipelineStageAccess::RayTracingShader_ShaderStorageWrite
1590             | PipelineStageAccess::TaskShader_ShaderStorageWrite
1591             | PipelineStageAccess::MeshShader_ShaderStorageWrite => AccessFlags::SHADER_STORAGE_WRITE,
1592             PipelineStageAccess::VideoDecode_VideoDecodeRead => AccessFlags::VIDEO_DECODE_READ,
1593             PipelineStageAccess::VideoDecode_VideoDecodeWrite => AccessFlags::VIDEO_DECODE_WRITE,
1594             PipelineStageAccess::VideoEncode_VideoEncodeRead => AccessFlags::VIDEO_ENCODE_READ,
1595             PipelineStageAccess::VideoEncode_VideoEncodeWrite => AccessFlags::VIDEO_ENCODE_WRITE,
1596             PipelineStageAccess::TransformFeedback_TransformFeedbackWrite => AccessFlags::TRANSFORM_FEEDBACK_WRITE,
1597             PipelineStageAccess::DrawIndirect_TransformFeedbackCounterRead
1598             | PipelineStageAccess::TransformFeedback_TransformFeedbackCounterRead => AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ,
1599             PipelineStageAccess::TransformFeedback_TransformFeedbackCounterWrite => AccessFlags::TRANSFORM_FEEDBACK_COUNTER_WRITE,
1600             PipelineStageAccess::ConditionalRendering_ConditionalRenderingRead => AccessFlags::CONDITIONAL_RENDERING_READ,
1601             PipelineStageAccess::CommandPreprocess_CommandPreprocessRead => AccessFlags::COMMAND_PREPROCESS_READ,
1602             PipelineStageAccess::CommandPreprocess_CommandPreprocessWrite => AccessFlags::COMMAND_PREPROCESS_WRITE,
1603             PipelineStageAccess::FragmentShadingRateAttachment_FragmentShadingRateAttachmentRead => AccessFlags::FRAGMENT_SHADING_RATE_ATTACHMENT_READ,
1604             PipelineStageAccess::VertexShader_AccelerationStructureRead
1605             | PipelineStageAccess::TessellationControlShader_AccelerationStructureRead
1606             | PipelineStageAccess::TessellationEvaluationShader_AccelerationStructureRead
1607             | PipelineStageAccess::GeometryShader_AccelerationStructureRead
1608             | PipelineStageAccess::FragmentShader_AccelerationStructureRead
1609             | PipelineStageAccess::ComputeShader_AccelerationStructureRead
1610             | PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureRead
1611             | PipelineStageAccess::RayTracingShader_AccelerationStructureRead
1612             | PipelineStageAccess::TaskShader_AccelerationStructureRead
1613             | PipelineStageAccess::MeshShader_AccelerationStructureRead => AccessFlags::ACCELERATION_STRUCTURE_READ,
1614             PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureWrite => AccessFlags::ACCELERATION_STRUCTURE_WRITE,
1615             PipelineStageAccess::FragmentDensityProcess_FragmentDensityMapRead => AccessFlags::FRAGMENT_DENSITY_MAP_READ,
1616             PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentReadNoncoherent => AccessFlags::COLOR_ATTACHMENT_READ_NONCOHERENT,
1617             PipelineStageAccess::InvocationMask_InvocationMaskRead => AccessFlags::INVOCATION_MASK_READ,
1618             PipelineStageAccess::RayTracingShader_ShaderBindingTableRead => AccessFlags::SHADER_BINDING_TABLE_READ,
1619             PipelineStageAccess::AccelerationStructureBuild_MicromapRead
1620             | PipelineStageAccess::MicromapBuild_MicromapRead => AccessFlags::MICROMAP_READ,
1621             PipelineStageAccess::MicromapBuild_MicromapWrite => AccessFlags::MICROMAP_WRITE,
1622             PipelineStageAccess::OpticalFlow_OpticalFlowRead => AccessFlags::OPTICAL_FLOW_READ,
1623             PipelineStageAccess::OpticalFlow_OpticalFlowWrite => AccessFlags::OPTICAL_FLOW_WRITE,
1624             PipelineStageAccess::__MAX_VALUE__ => unreachable!(),
1625         }
1626     }
1627 }
1628 
1629 #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash)]
1630 pub(crate) struct PipelineStageAccessSet(u128);
1631 
1632 #[allow(dead_code)]
1633 impl PipelineStageAccessSet {
1634     #[inline]
empty() -> Self1635     pub(crate) const fn empty() -> Self {
1636         Self(0)
1637     }
1638 
1639     #[inline]
count(self) -> u321640     pub(crate) const fn count(self) -> u32 {
1641         self.0.count_ones()
1642     }
1643 
1644     #[inline]
is_empty(self) -> bool1645     pub(crate) const fn is_empty(self) -> bool {
1646         self.0 == 0
1647     }
1648 
1649     #[inline]
intersects(self, other: Self) -> bool1650     pub(crate) const fn intersects(self, other: Self) -> bool {
1651         self.0 & other.0 != 0
1652     }
1653 
1654     #[inline]
contains(self, other: Self) -> bool1655     pub(crate) const fn contains(self, other: Self) -> bool {
1656         self.0 & other.0 == other.0
1657     }
1658 
1659     #[inline]
union(self, other: Self) -> Self1660     pub(crate) const fn union(self, other: Self) -> Self {
1661         Self(self.0 | other.0)
1662     }
1663 
1664     #[inline]
intersection(self, other: Self) -> Self1665     pub(crate) const fn intersection(self, other: Self) -> Self {
1666         Self(self.0 & other.0)
1667     }
1668 
1669     #[inline]
difference(self, other: Self) -> Self1670     pub(crate) const fn difference(self, other: Self) -> Self {
1671         Self(self.0 & !other.0)
1672     }
1673 
1674     #[inline]
symmetric_difference(self, other: Self) -> Self1675     pub(crate) const fn symmetric_difference(self, other: Self) -> Self {
1676         Self(self.0 ^ other.0)
1677     }
1678 
1679     #[inline]
contains_enum(self, val: PipelineStageAccess) -> bool1680     pub(crate) fn contains_enum(self, val: PipelineStageAccess) -> bool {
1681         self.intersects(val.into())
1682     }
1683 }
1684 
1685 impl std::ops::BitAnd for PipelineStageAccessSet {
1686     type Output = Self;
1687 
1688     #[inline]
bitand(self, rhs: Self) -> Self1689     fn bitand(self, rhs: Self) -> Self {
1690         self.intersection(rhs)
1691     }
1692 }
1693 
1694 impl std::ops::BitAndAssign for PipelineStageAccessSet {
1695     #[inline]
bitand_assign(&mut self, rhs: Self)1696     fn bitand_assign(&mut self, rhs: Self) {
1697         *self = self.intersection(rhs);
1698     }
1699 }
1700 
1701 impl std::ops::BitOr for PipelineStageAccessSet {
1702     type Output = Self;
1703 
1704     #[inline]
bitor(self, rhs: Self) -> Self1705     fn bitor(self, rhs: Self) -> Self {
1706         self.union(rhs)
1707     }
1708 }
1709 
1710 impl std::ops::BitOrAssign for PipelineStageAccessSet {
1711     #[inline]
bitor_assign(&mut self, rhs: Self)1712     fn bitor_assign(&mut self, rhs: Self) {
1713         *self = self.union(rhs);
1714     }
1715 }
1716 
1717 impl std::ops::BitXor for PipelineStageAccessSet {
1718     type Output = Self;
1719 
1720     #[inline]
bitxor(self, rhs: Self) -> Self1721     fn bitxor(self, rhs: Self) -> Self {
1722         self.symmetric_difference(rhs)
1723     }
1724 }
1725 
1726 impl std::ops::BitXorAssign for PipelineStageAccessSet {
1727     #[inline]
bitxor_assign(&mut self, rhs: Self)1728     fn bitxor_assign(&mut self, rhs: Self) {
1729         *self = self.symmetric_difference(rhs);
1730     }
1731 }
1732 
1733 impl std::ops::Sub for PipelineStageAccessSet {
1734     type Output = Self;
1735 
1736     #[inline]
sub(self, rhs: Self) -> Self1737     fn sub(self, rhs: Self) -> Self {
1738         self.difference(rhs)
1739     }
1740 }
1741 
1742 impl std::ops::SubAssign for PipelineStageAccessSet {
1743     #[inline]
sub_assign(&mut self, rhs: Self)1744     fn sub_assign(&mut self, rhs: Self) {
1745         *self = self.difference(rhs);
1746     }
1747 }
1748 
1749 impl From<PipelineStageAccess> for PipelineStageAccessSet {
1750     #[inline]
from(val: PipelineStageAccess) -> Self1751     fn from(val: PipelineStageAccess) -> Self {
1752         debug_assert!(val != PipelineStageAccess::__MAX_VALUE__); // You did something very dumb...
1753         Self(1u128 << val as u8)
1754     }
1755 }
1756 
1757 impl From<PipelineStages> for PipelineStageAccessSet {
1758     #[inline]
from(stages: PipelineStages) -> Self1759     fn from(stages: PipelineStages) -> Self {
1760         let mut result = Self::empty();
1761 
1762         if stages.intersects(PipelineStages::DRAW_INDIRECT) {
1763             result |= Self::from(PipelineStageAccess::DrawIndirect_IndirectCommandRead)
1764                 | Self::from(PipelineStageAccess::DrawIndirect_TransformFeedbackCounterRead)
1765         }
1766 
1767         if stages.intersects(PipelineStages::VERTEX_SHADER) {
1768             result |= Self::from(PipelineStageAccess::VertexShader_UniformRead)
1769                 | Self::from(PipelineStageAccess::VertexShader_ShaderSampledRead)
1770                 | Self::from(PipelineStageAccess::VertexShader_ShaderStorageRead)
1771                 | Self::from(PipelineStageAccess::VertexShader_ShaderStorageWrite)
1772                 | Self::from(PipelineStageAccess::VertexShader_AccelerationStructureRead)
1773         }
1774 
1775         if stages.intersects(PipelineStages::TESSELLATION_CONTROL_SHADER) {
1776             result |= Self::from(PipelineStageAccess::TessellationControlShader_UniformRead)
1777                 | Self::from(PipelineStageAccess::TessellationControlShader_ShaderSampledRead)
1778                 | Self::from(PipelineStageAccess::TessellationControlShader_ShaderStorageRead)
1779                 | Self::from(PipelineStageAccess::TessellationControlShader_ShaderStorageWrite)
1780                 | Self::from(
1781                     PipelineStageAccess::TessellationControlShader_AccelerationStructureRead,
1782                 )
1783         }
1784 
1785         if stages.intersects(PipelineStages::TESSELLATION_EVALUATION_SHADER) {
1786             result |= Self::from(PipelineStageAccess::TessellationEvaluationShader_UniformRead)
1787                 | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderSampledRead)
1788                 | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderStorageRead)
1789                 | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderStorageWrite)
1790                 | Self::from(
1791                     PipelineStageAccess::TessellationEvaluationShader_AccelerationStructureRead,
1792                 )
1793         }
1794 
1795         if stages.intersects(PipelineStages::GEOMETRY_SHADER) {
1796             result |= Self::from(PipelineStageAccess::GeometryShader_UniformRead)
1797                 | Self::from(PipelineStageAccess::GeometryShader_ShaderSampledRead)
1798                 | Self::from(PipelineStageAccess::GeometryShader_ShaderStorageRead)
1799                 | Self::from(PipelineStageAccess::GeometryShader_ShaderStorageWrite)
1800                 | Self::from(PipelineStageAccess::GeometryShader_AccelerationStructureRead)
1801         }
1802 
1803         if stages.intersects(PipelineStages::FRAGMENT_SHADER) {
1804             result |= Self::from(PipelineStageAccess::FragmentShader_UniformRead)
1805                 | Self::from(PipelineStageAccess::FragmentShader_InputAttachmentRead)
1806                 | Self::from(PipelineStageAccess::FragmentShader_ShaderSampledRead)
1807                 | Self::from(PipelineStageAccess::FragmentShader_ShaderStorageRead)
1808                 | Self::from(PipelineStageAccess::FragmentShader_ShaderStorageWrite)
1809                 | Self::from(PipelineStageAccess::FragmentShader_AccelerationStructureRead)
1810         }
1811 
1812         if stages.intersects(PipelineStages::EARLY_FRAGMENT_TESTS) {
1813             result |= Self::from(PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentRead)
1814                 | Self::from(PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentWrite)
1815         }
1816 
1817         if stages.intersects(PipelineStages::LATE_FRAGMENT_TESTS) {
1818             result |= Self::from(PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentRead)
1819                 | Self::from(PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentWrite)
1820         }
1821 
1822         if stages.intersects(PipelineStages::COLOR_ATTACHMENT_OUTPUT) {
1823             result |= Self::from(PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentRead)
1824                 | Self::from(PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentWrite)
1825                 | Self::from(
1826                     PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentReadNoncoherent,
1827                 )
1828         }
1829 
1830         if stages.intersects(PipelineStages::COMPUTE_SHADER) {
1831             result |= Self::from(PipelineStageAccess::ComputeShader_UniformRead)
1832                 | Self::from(PipelineStageAccess::ComputeShader_ShaderSampledRead)
1833                 | Self::from(PipelineStageAccess::ComputeShader_ShaderStorageRead)
1834                 | Self::from(PipelineStageAccess::ComputeShader_ShaderStorageWrite)
1835                 | Self::from(PipelineStageAccess::ComputeShader_AccelerationStructureRead)
1836         }
1837 
1838         if stages.intersects(PipelineStages::HOST) {
1839             result |= Self::from(PipelineStageAccess::Host_HostRead)
1840                 | Self::from(PipelineStageAccess::Host_HostWrite)
1841         }
1842 
1843         if stages.intersects(PipelineStages::COPY) {
1844             result |= Self::from(PipelineStageAccess::Copy_TransferRead)
1845                 | Self::from(PipelineStageAccess::Copy_TransferWrite)
1846         }
1847 
1848         if stages.intersects(PipelineStages::RESOLVE) {
1849             result |= Self::from(PipelineStageAccess::Resolve_TransferRead)
1850                 | Self::from(PipelineStageAccess::Resolve_TransferWrite)
1851         }
1852 
1853         if stages.intersects(PipelineStages::BLIT) {
1854             result |= Self::from(PipelineStageAccess::Blit_TransferRead)
1855                 | Self::from(PipelineStageAccess::Blit_TransferWrite)
1856         }
1857 
1858         if stages.intersects(PipelineStages::CLEAR) {
1859             result |= Self::from(PipelineStageAccess::Clear_TransferWrite)
1860         }
1861 
1862         if stages.intersects(PipelineStages::INDEX_INPUT) {
1863             result |= Self::from(PipelineStageAccess::IndexInput_IndexRead)
1864         }
1865 
1866         if stages.intersects(PipelineStages::VERTEX_ATTRIBUTE_INPUT) {
1867             result |= Self::from(PipelineStageAccess::VertexAttributeInput_VertexAttributeRead)
1868         }
1869 
1870         if stages.intersects(PipelineStages::VIDEO_DECODE) {
1871             result |= Self::from(PipelineStageAccess::VideoDecode_VideoDecodeRead)
1872                 | Self::from(PipelineStageAccess::VideoDecode_VideoDecodeWrite)
1873         }
1874 
1875         if stages.intersects(PipelineStages::VIDEO_ENCODE) {
1876             result |= Self::from(PipelineStageAccess::VideoEncode_VideoEncodeRead)
1877                 | Self::from(PipelineStageAccess::VideoEncode_VideoEncodeWrite)
1878         }
1879 
1880         if stages.intersects(PipelineStages::TRANSFORM_FEEDBACK) {
1881             result |= Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackWrite)
1882                 | Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackCounterRead)
1883                 | Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackCounterWrite)
1884         }
1885 
1886         if stages.intersects(PipelineStages::CONDITIONAL_RENDERING) {
1887             result |= Self::from(PipelineStageAccess::ConditionalRendering_ConditionalRenderingRead)
1888         }
1889 
1890         if stages.intersects(PipelineStages::ACCELERATION_STRUCTURE_BUILD) {
1891             result |=
1892                 Self::from(PipelineStageAccess::AccelerationStructureBuild_IndirectCommandRead)
1893                     | Self::from(PipelineStageAccess::AccelerationStructureBuild_UniformRead)
1894                     | Self::from(PipelineStageAccess::AccelerationStructureBuild_TransferRead)
1895                     | Self::from(PipelineStageAccess::AccelerationStructureBuild_TransferWrite)
1896                     | Self::from(PipelineStageAccess::AccelerationStructureBuild_ShaderSampledRead)
1897                     | Self::from(PipelineStageAccess::AccelerationStructureBuild_ShaderStorageRead)
1898                     | Self::from(
1899                         PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureRead,
1900                     )
1901                     | Self::from(
1902                         PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureWrite,
1903                     )
1904             // | Self::from(PipelineStageAccess::AccelerationStructureBuild_MicromapRead)
1905         }
1906 
1907         if stages.intersects(PipelineStages::RAY_TRACING_SHADER) {
1908             result |= Self::from(PipelineStageAccess::RayTracingShader_UniformRead)
1909                 | Self::from(PipelineStageAccess::RayTracingShader_ShaderSampledRead)
1910                 | Self::from(PipelineStageAccess::RayTracingShader_ShaderStorageRead)
1911                 | Self::from(PipelineStageAccess::RayTracingShader_ShaderStorageWrite)
1912                 | Self::from(PipelineStageAccess::RayTracingShader_AccelerationStructureRead)
1913             // | Self::from(PipelineStageAccess::RayTracingShader_ShaderBindingTableRead)
1914         }
1915 
1916         if stages.intersects(PipelineStages::FRAGMENT_DENSITY_PROCESS) {
1917             result |= Self::from(PipelineStageAccess::FragmentDensityProcess_FragmentDensityMapRead)
1918         }
1919 
1920         if stages.intersects(PipelineStages::FRAGMENT_SHADING_RATE_ATTACHMENT) {
1921             result |=
1922                 PipelineStageAccess::FragmentShadingRateAttachment_FragmentShadingRateAttachmentRead
1923                     .into()
1924         }
1925 
1926         if stages.intersects(PipelineStages::COMMAND_PREPROCESS) {
1927             result |= Self::from(PipelineStageAccess::CommandPreprocess_CommandPreprocessRead)
1928                 | Self::from(PipelineStageAccess::CommandPreprocess_CommandPreprocessWrite)
1929         }
1930 
1931         if stages.intersects(PipelineStages::TASK_SHADER) {
1932             result |= Self::from(PipelineStageAccess::TaskShader_UniformRead)
1933                 | Self::from(PipelineStageAccess::TaskShader_ShaderSampledRead)
1934                 | Self::from(PipelineStageAccess::TaskShader_ShaderStorageRead)
1935                 | Self::from(PipelineStageAccess::TaskShader_ShaderStorageWrite)
1936                 | Self::from(PipelineStageAccess::TaskShader_AccelerationStructureRead)
1937         }
1938 
1939         if stages.intersects(PipelineStages::MESH_SHADER) {
1940             result |= Self::from(PipelineStageAccess::MeshShader_UniformRead)
1941                 | Self::from(PipelineStageAccess::MeshShader_ShaderSampledRead)
1942                 | Self::from(PipelineStageAccess::MeshShader_ShaderStorageRead)
1943                 | Self::from(PipelineStageAccess::MeshShader_ShaderStorageWrite)
1944                 | Self::from(PipelineStageAccess::MeshShader_AccelerationStructureRead)
1945         }
1946 
1947         if stages.intersects(PipelineStages::SUBPASS_SHADING) {
1948             result |= Self::from(PipelineStageAccess::SubpassShading_InputAttachmentRead)
1949         }
1950 
1951         if stages.intersects(PipelineStages::INVOCATION_MASK) {
1952             result |= Self::from(PipelineStageAccess::InvocationMask_InvocationMaskRead)
1953         }
1954 
1955         /*
1956         if stages.intersects(PipelineStages::OPTICAL_FLOW) {
1957             result |= Self::from(PipelineStageAccess::OpticalFlow_OpticalFlowRead)
1958                 | Self::from(PipelineStageAccess::OpticalFlow_OpticalFlowWrite)
1959         }
1960 
1961         if stages.intersects(PipelineStages::MICROMAP_BUILD) {
1962             result |= Self::from(PipelineStageAccess::MicromapBuild_MicromapWrite)
1963                 | Self::from(PipelineStageAccess::MicromapBuild_MicromapRead)
1964         }
1965          */
1966 
1967         result
1968     }
1969 }
1970 
1971 impl From<AccessFlags> for PipelineStageAccessSet {
1972     #[inline]
from(access: AccessFlags) -> Self1973     fn from(access: AccessFlags) -> Self {
1974         let mut result = Self::empty();
1975 
1976         if access.intersects(AccessFlags::INDIRECT_COMMAND_READ) {
1977             result |= Self::from(PipelineStageAccess::DrawIndirect_IndirectCommandRead)
1978                 | Self::from(PipelineStageAccess::AccelerationStructureBuild_IndirectCommandRead)
1979         }
1980 
1981         if access.intersects(AccessFlags::INDEX_READ) {
1982             result |= Self::from(PipelineStageAccess::IndexInput_IndexRead)
1983         }
1984 
1985         if access.intersects(AccessFlags::VERTEX_ATTRIBUTE_READ) {
1986             result |= Self::from(PipelineStageAccess::VertexAttributeInput_VertexAttributeRead)
1987         }
1988 
1989         if access.intersects(AccessFlags::UNIFORM_READ) {
1990             result |= Self::from(PipelineStageAccess::VertexShader_UniformRead)
1991                 | Self::from(PipelineStageAccess::TessellationControlShader_UniformRead)
1992                 | Self::from(PipelineStageAccess::TessellationEvaluationShader_UniformRead)
1993                 | Self::from(PipelineStageAccess::GeometryShader_UniformRead)
1994                 | Self::from(PipelineStageAccess::FragmentShader_UniformRead)
1995                 | Self::from(PipelineStageAccess::ComputeShader_UniformRead)
1996                 | Self::from(PipelineStageAccess::AccelerationStructureBuild_UniformRead)
1997                 | Self::from(PipelineStageAccess::RayTracingShader_UniformRead)
1998                 | Self::from(PipelineStageAccess::TaskShader_UniformRead)
1999                 | Self::from(PipelineStageAccess::MeshShader_UniformRead)
2000         }
2001 
2002         if access.intersects(AccessFlags::INPUT_ATTACHMENT_READ) {
2003             result |= Self::from(PipelineStageAccess::FragmentShader_InputAttachmentRead)
2004                 | Self::from(PipelineStageAccess::SubpassShading_InputAttachmentRead)
2005         }
2006 
2007         if access.intersects(AccessFlags::COLOR_ATTACHMENT_READ) {
2008             result |= Self::from(PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentRead)
2009         }
2010 
2011         if access.intersects(AccessFlags::COLOR_ATTACHMENT_WRITE) {
2012             result |= Self::from(PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentWrite)
2013         }
2014 
2015         if access.intersects(AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ) {
2016             result |= Self::from(PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentRead)
2017                 | Self::from(PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentRead)
2018         }
2019 
2020         if access.intersects(AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE) {
2021             result |=
2022                 Self::from(PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentWrite)
2023                     | Self::from(PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentWrite)
2024         }
2025 
2026         if access.intersects(AccessFlags::TRANSFER_READ) {
2027             result |= Self::from(PipelineStageAccess::Copy_TransferRead)
2028                 | Self::from(PipelineStageAccess::Resolve_TransferRead)
2029                 | Self::from(PipelineStageAccess::Blit_TransferRead)
2030                 | Self::from(PipelineStageAccess::AccelerationStructureBuild_TransferRead)
2031         }
2032 
2033         if access.intersects(AccessFlags::TRANSFER_WRITE) {
2034             result |= Self::from(PipelineStageAccess::Copy_TransferWrite)
2035                 | Self::from(PipelineStageAccess::Resolve_TransferWrite)
2036                 | Self::from(PipelineStageAccess::Blit_TransferWrite)
2037                 | Self::from(PipelineStageAccess::Clear_TransferWrite)
2038                 | Self::from(PipelineStageAccess::AccelerationStructureBuild_TransferWrite)
2039         }
2040 
2041         if access.intersects(AccessFlags::HOST_READ) {
2042             result |= Self::from(PipelineStageAccess::Host_HostRead)
2043         }
2044 
2045         if access.intersects(AccessFlags::HOST_WRITE) {
2046             result |= Self::from(PipelineStageAccess::Host_HostWrite)
2047         }
2048 
2049         if access.intersects(AccessFlags::SHADER_SAMPLED_READ) {
2050             result |= Self::from(PipelineStageAccess::VertexShader_ShaderSampledRead)
2051                 | Self::from(PipelineStageAccess::TessellationControlShader_ShaderSampledRead)
2052                 | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderSampledRead)
2053                 | Self::from(PipelineStageAccess::GeometryShader_ShaderSampledRead)
2054                 | Self::from(PipelineStageAccess::FragmentShader_ShaderSampledRead)
2055                 | Self::from(PipelineStageAccess::ComputeShader_ShaderSampledRead)
2056                 | Self::from(PipelineStageAccess::AccelerationStructureBuild_ShaderSampledRead)
2057                 | Self::from(PipelineStageAccess::RayTracingShader_ShaderSampledRead)
2058                 | Self::from(PipelineStageAccess::TaskShader_ShaderSampledRead)
2059                 | Self::from(PipelineStageAccess::MeshShader_ShaderSampledRead)
2060         }
2061 
2062         if access.intersects(AccessFlags::SHADER_STORAGE_READ) {
2063             result |= Self::from(PipelineStageAccess::VertexShader_ShaderStorageRead)
2064                 | Self::from(PipelineStageAccess::TessellationControlShader_ShaderStorageRead)
2065                 | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderStorageRead)
2066                 | Self::from(PipelineStageAccess::GeometryShader_ShaderStorageRead)
2067                 | Self::from(PipelineStageAccess::FragmentShader_ShaderStorageRead)
2068                 | Self::from(PipelineStageAccess::ComputeShader_ShaderStorageRead)
2069                 | Self::from(PipelineStageAccess::AccelerationStructureBuild_ShaderStorageRead)
2070                 | Self::from(PipelineStageAccess::RayTracingShader_ShaderStorageRead)
2071                 | Self::from(PipelineStageAccess::TaskShader_ShaderStorageRead)
2072                 | Self::from(PipelineStageAccess::MeshShader_ShaderStorageRead)
2073         }
2074 
2075         if access.intersects(AccessFlags::SHADER_STORAGE_WRITE) {
2076             result |= Self::from(PipelineStageAccess::VertexShader_ShaderStorageWrite)
2077                 | Self::from(PipelineStageAccess::TessellationControlShader_ShaderStorageWrite)
2078                 | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderStorageWrite)
2079                 | Self::from(PipelineStageAccess::GeometryShader_ShaderStorageWrite)
2080                 | Self::from(PipelineStageAccess::FragmentShader_ShaderStorageWrite)
2081                 | Self::from(PipelineStageAccess::ComputeShader_ShaderStorageWrite)
2082                 | Self::from(PipelineStageAccess::RayTracingShader_ShaderStorageWrite)
2083                 | Self::from(PipelineStageAccess::TaskShader_ShaderStorageWrite)
2084                 | Self::from(PipelineStageAccess::MeshShader_ShaderStorageWrite)
2085         }
2086 
2087         if access.intersects(AccessFlags::VIDEO_DECODE_READ) {
2088             result |= Self::from(PipelineStageAccess::VideoDecode_VideoDecodeRead)
2089         }
2090 
2091         if access.intersects(AccessFlags::VIDEO_DECODE_WRITE) {
2092             result |= Self::from(PipelineStageAccess::VideoDecode_VideoDecodeWrite)
2093         }
2094 
2095         if access.intersects(AccessFlags::VIDEO_ENCODE_READ) {
2096             result |= Self::from(PipelineStageAccess::VideoEncode_VideoEncodeRead)
2097         }
2098 
2099         if access.intersects(AccessFlags::VIDEO_ENCODE_WRITE) {
2100             result |= Self::from(PipelineStageAccess::VideoEncode_VideoEncodeWrite)
2101         }
2102 
2103         if access.intersects(AccessFlags::TRANSFORM_FEEDBACK_WRITE) {
2104             result |= Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackWrite)
2105         }
2106 
2107         if access.intersects(AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ) {
2108             result |= Self::from(PipelineStageAccess::DrawIndirect_TransformFeedbackCounterRead)
2109                 | Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackCounterRead)
2110         }
2111 
2112         if access.intersects(AccessFlags::TRANSFORM_FEEDBACK_COUNTER_WRITE) {
2113             result |=
2114                 Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackCounterWrite)
2115         }
2116 
2117         if access.intersects(AccessFlags::CONDITIONAL_RENDERING_READ) {
2118             result |= Self::from(PipelineStageAccess::ConditionalRendering_ConditionalRenderingRead)
2119         }
2120 
2121         if access.intersects(AccessFlags::COMMAND_PREPROCESS_READ) {
2122             result |= Self::from(PipelineStageAccess::CommandPreprocess_CommandPreprocessRead)
2123         }
2124 
2125         if access.intersects(AccessFlags::COMMAND_PREPROCESS_WRITE) {
2126             result |= Self::from(PipelineStageAccess::CommandPreprocess_CommandPreprocessWrite)
2127         }
2128 
2129         if access.intersects(AccessFlags::FRAGMENT_SHADING_RATE_ATTACHMENT_READ) {
2130             result |=
2131                 Self::from(PipelineStageAccess::FragmentShadingRateAttachment_FragmentShadingRateAttachmentRead)
2132         }
2133 
2134         if access.intersects(AccessFlags::ACCELERATION_STRUCTURE_READ) {
2135             result |= Self::from(PipelineStageAccess::VertexShader_AccelerationStructureRead)
2136                 | Self::from(
2137                     PipelineStageAccess::TessellationControlShader_AccelerationStructureRead,
2138                 )
2139                 | Self::from(
2140                     PipelineStageAccess::TessellationEvaluationShader_AccelerationStructureRead,
2141                 )
2142                 | Self::from(PipelineStageAccess::GeometryShader_AccelerationStructureRead)
2143                 | Self::from(PipelineStageAccess::FragmentShader_AccelerationStructureRead)
2144                 | Self::from(PipelineStageAccess::ComputeShader_AccelerationStructureRead)
2145                 | Self::from(
2146                     PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureRead,
2147                 )
2148                 | Self::from(PipelineStageAccess::RayTracingShader_AccelerationStructureRead)
2149                 | Self::from(PipelineStageAccess::TaskShader_AccelerationStructureRead)
2150                 | Self::from(PipelineStageAccess::MeshShader_AccelerationStructureRead)
2151         }
2152 
2153         if access.intersects(AccessFlags::ACCELERATION_STRUCTURE_WRITE) {
2154             result |= Self::from(
2155                 PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureWrite,
2156             )
2157         }
2158 
2159         if access.intersects(AccessFlags::FRAGMENT_DENSITY_MAP_READ) {
2160             result |= Self::from(PipelineStageAccess::FragmentDensityProcess_FragmentDensityMapRead)
2161         }
2162 
2163         if access.intersects(AccessFlags::COLOR_ATTACHMENT_READ_NONCOHERENT) {
2164             result |= Self::from(
2165                 PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentReadNoncoherent,
2166             )
2167         }
2168 
2169         if access.intersects(AccessFlags::INVOCATION_MASK_READ) {
2170             result |= Self::from(PipelineStageAccess::InvocationMask_InvocationMaskRead)
2171         }
2172 
2173         /*
2174         if access.intersects(AccessFlags::SHADER_BINDING_TABLE_READ) {
2175             result |= Self::from(PipelineStageAccess::RayTracingShader_ShaderBindingTableRead)
2176         }
2177 
2178         if access.intersects(AccessFlags::MICROMAP_READ) {
2179             result |= Self::from(PipelineStageAccess::AccelerationStructureBuild_MicromapRead)
2180                 | Self::from(PipelineStageAccess::MicromapBuild_MicromapRead)
2181         }
2182 
2183         if access.intersects(AccessFlags::MICROMAP_WRITE) {
2184             result |= Self::from(PipelineStageAccess::MicromapBuild_MicromapWrite)
2185         }
2186 
2187         if access.intersects(AccessFlags::OPTICAL_FLOW_READ) {
2188             result |= Self::from(PipelineStageAccess::OpticalFlow_OpticalFlowRead)
2189         }
2190 
2191         if access.intersects(AccessFlags::OPTICAL_FLOW_WRITE) {
2192             result |= Self::from(PipelineStageAccess::OpticalFlow_OpticalFlowWrite)
2193         }
2194          */
2195 
2196         result
2197     }
2198 }
2199 
2200 /// Dependency info for barriers in a pipeline barrier or event command.
2201 ///
2202 /// A pipeline barrier creates a dependency between commands submitted before the barrier (the
2203 /// source scope) and commands submitted after it (the destination scope). An event command acts
2204 /// like a split pipeline barrier: the source scope and destination scope are defined
2205 /// relative to different commands. Each `DependencyInfo` consists of multiple individual barriers
2206 /// that concern a either single resource or operate globally.
2207 ///
2208 /// Each barrier has a set of source/destination pipeline stages and source/destination memory
2209 /// access types. The pipeline stages create an *execution dependency*: the `src_stages` of
2210 /// commands submitted before the barrier must be completely finished before before any of the
2211 /// `dst_stages` of commands after the barrier are allowed to start. The memory access types
2212 /// create a *memory dependency*: in addition to the execution dependency, any `src_access`
2213 /// performed before the barrier must be made available and visible before any `dst_access`
2214 /// are made after the barrier.
2215 #[derive(Clone, Debug)]
2216 pub struct DependencyInfo {
2217     /// Flags to modify how the execution and memory dependencies are formed.
2218     ///
2219     /// The default value is empty.
2220     pub dependency_flags: DependencyFlags,
2221 
2222     /// Memory barriers for global operations and accesses, not limited to a single resource.
2223     ///
2224     /// The default value is empty.
2225     pub memory_barriers: SmallVec<[MemoryBarrier; 2]>,
2226 
2227     /// Memory barriers for individual buffers.
2228     ///
2229     /// The default value is empty.
2230     pub buffer_memory_barriers: SmallVec<[BufferMemoryBarrier; 8]>,
2231 
2232     /// Memory barriers for individual images.
2233     ///
2234     /// The default value is empty.
2235     pub image_memory_barriers: SmallVec<[ImageMemoryBarrier; 8]>,
2236 
2237     pub _ne: crate::NonExhaustive,
2238 }
2239 
2240 impl DependencyInfo {
2241     /// Returns whether `self` contains any barriers.
2242     #[inline]
is_empty(&self) -> bool2243     pub fn is_empty(&self) -> bool {
2244         self.memory_barriers.is_empty()
2245             && self.buffer_memory_barriers.is_empty()
2246             && self.image_memory_barriers.is_empty()
2247     }
2248 
2249     /// Clears all barriers.
2250     #[inline]
clear(&mut self)2251     pub fn clear(&mut self) {
2252         self.memory_barriers.clear();
2253         self.buffer_memory_barriers.clear();
2254         self.image_memory_barriers.clear();
2255     }
2256 }
2257 
2258 impl Default for DependencyInfo {
2259     #[inline]
default() -> Self2260     fn default() -> Self {
2261         Self {
2262             dependency_flags: DependencyFlags::empty(),
2263             memory_barriers: SmallVec::new(),
2264             buffer_memory_barriers: SmallVec::new(),
2265             image_memory_barriers: SmallVec::new(),
2266             _ne: crate::NonExhaustive(()),
2267         }
2268     }
2269 }
2270 
2271 vulkan_bitflags! {
2272     #[non_exhaustive]
2273 
2274     /// Flags that modify how execution and memory dependencies are formed.
2275     DependencyFlags = DependencyFlags(u32);
2276 
2277     /// For framebuffer-space pipeline stages, specifies that the dependency is framebuffer-local.
2278     /// The implementation can start the destination operation for some given pixels as long as the
2279     /// source operation is finished for these given pixels.
2280     ///
2281     /// Framebuffer-local dependencies are usually more efficient, especially on tile-based
2282     /// architectures.
2283     BY_REGION = BY_REGION,
2284 
2285     /// For devices that consist of multiple physical devices, specifies that the dependency is
2286     /// device-local. The dependency will only apply to the operations on each physical device
2287     /// individually, rather than applying to all physical devices as a whole. This allows each
2288     /// physical device to operate independently of the others.
2289     ///
2290     /// The device API version must be at least 1.1, or the [`khr_device_group`] extension must be
2291     /// enabled on the device.
2292     ///
2293     /// [`khr_device_group`]: crate::device::DeviceExtensions::khr_device_group
2294     DEVICE_GROUP = DEVICE_GROUP {
2295         api_version: V1_1,
2296         device_extensions: [khr_device_group],
2297     },
2298 
2299 
2300     /// For subpass dependencies, and pipeline barriers executing within a render pass instance,
2301     /// if the render pass uses multiview rendering, specifies that the dependency is view-local.
2302     /// Each view in the destination subpass will only depend on a single view in the destination
2303     /// subpass, instead of all views.
2304     ///
2305     /// The device API version must be at least 1.1, or the [`khr_multiview`] extension must be
2306     /// enabled on the device.
2307     ///
2308     /// [`khr_multiview`]: crate::device::DeviceExtensions::khr_multiview
2309     VIEW_LOCAL = VIEW_LOCAL {
2310         api_version: V1_1,
2311         device_extensions: [khr_multiview],
2312     },
2313 }
2314 
2315 /// A memory barrier that is applied globally.
2316 #[derive(Clone, Debug)]
2317 pub struct MemoryBarrier {
2318     /// The pipeline stages in the source scope to wait for.
2319     ///
2320     /// The default value is [`PipelineStages::empty()`].
2321     pub src_stages: PipelineStages,
2322 
2323     /// The memory accesses in the source scope to make available and visible.
2324     ///
2325     /// The default value is [`AccessFlags::empty()`].
2326     pub src_access: AccessFlags,
2327 
2328     /// The pipeline stages in the destination scope that must wait for `src_stages`.
2329     ///
2330     /// The default value is [`PipelineStages::empty()`].
2331     pub dst_stages: PipelineStages,
2332 
2333     /// The memory accesses in the destination scope that must wait for `src_access` to be made
2334     /// available and visible.
2335     pub dst_access: AccessFlags,
2336 
2337     pub _ne: crate::NonExhaustive,
2338 }
2339 
2340 impl Default for MemoryBarrier {
2341     #[inline]
default() -> Self2342     fn default() -> Self {
2343         Self {
2344             src_stages: PipelineStages::empty(),
2345             src_access: AccessFlags::empty(),
2346             dst_stages: PipelineStages::empty(),
2347             dst_access: AccessFlags::empty(),
2348             _ne: crate::NonExhaustive(()),
2349         }
2350     }
2351 }
2352 
2353 /// A memory barrier that is applied to a single buffer.
2354 #[derive(Clone, Debug)]
2355 pub struct BufferMemoryBarrier {
2356     /// The pipeline stages in the source scope to wait for.
2357     ///
2358     /// The default value is [`PipelineStages::empty()`].
2359     pub src_stages: PipelineStages,
2360 
2361     /// The memory accesses in the source scope to make available and visible.
2362     ///
2363     /// The default value is [`AccessFlags::empty()`].
2364     pub src_access: AccessFlags,
2365 
2366     /// The pipeline stages in the destination scope that must wait for `src_stages`.
2367     ///
2368     /// The default value is [`PipelineStages::empty()`].
2369     pub dst_stages: PipelineStages,
2370 
2371     /// The memory accesses in the destination scope that must wait for `src_access` to be made
2372     /// available and visible.
2373     pub dst_access: AccessFlags,
2374 
2375     /// For resources created with [`Sharing::Exclusive`](crate::sync::Sharing), transfers
2376     /// ownership of a resource from one queue family to another.
2377     pub queue_family_ownership_transfer: Option<QueueFamilyOwnershipTransfer>,
2378 
2379     /// The buffer to apply the barrier to.
2380     pub buffer: Arc<Buffer>,
2381 
2382     /// The byte range of `buffer` to apply the barrier to.
2383     pub range: Range<DeviceSize>,
2384 
2385     pub _ne: crate::NonExhaustive,
2386 }
2387 
2388 impl BufferMemoryBarrier {
2389     #[inline]
buffer(buffer: Arc<Buffer>) -> Self2390     pub fn buffer(buffer: Arc<Buffer>) -> Self {
2391         Self {
2392             src_stages: PipelineStages::empty(),
2393             src_access: AccessFlags::empty(),
2394             dst_stages: PipelineStages::empty(),
2395             dst_access: AccessFlags::empty(),
2396             queue_family_ownership_transfer: None,
2397             buffer,
2398             range: 0..0,
2399             _ne: crate::NonExhaustive(()),
2400         }
2401     }
2402 }
2403 
2404 /// A memory barrier that is applied to a single image.
2405 #[derive(Clone, Debug)]
2406 pub struct ImageMemoryBarrier {
2407     /// The pipeline stages in the source scope to wait for.
2408     ///
2409     /// The default value is [`PipelineStages::empty()`].
2410     pub src_stages: PipelineStages,
2411 
2412     /// The memory accesses in the source scope to make available and visible.
2413     ///
2414     /// The default value is [`AccessFlags::empty()`].
2415     pub src_access: AccessFlags,
2416 
2417     /// The pipeline stages in the destination scope that must wait for `src_stages`.
2418     ///
2419     /// The default value is [`PipelineStages::empty()`].
2420     pub dst_stages: PipelineStages,
2421 
2422     /// The memory accesses in the destination scope that must wait for `src_access` to be made
2423     /// available and visible.
2424     pub dst_access: AccessFlags,
2425 
2426     /// The layout that the specified `subresource_range` of `image` is expected to be in when the
2427     /// source scope completes.
2428     pub old_layout: ImageLayout,
2429 
2430     /// The layout that the specified `subresource_range` of `image` will be transitioned to before
2431     /// the destination scope begins.
2432     pub new_layout: ImageLayout,
2433 
2434     /// For resources created with [`Sharing::Exclusive`](crate::sync::Sharing), transfers
2435     /// ownership of a resource from one queue family to another.
2436     pub queue_family_ownership_transfer: Option<QueueFamilyOwnershipTransfer>,
2437 
2438     /// The image to apply the barrier to.
2439     pub image: Arc<Image>,
2440 
2441     /// The subresource range of `image` to apply the barrier to.
2442     pub subresource_range: ImageSubresourceRange,
2443 
2444     pub _ne: crate::NonExhaustive,
2445 }
2446 
2447 impl ImageMemoryBarrier {
2448     #[inline]
image(image: Arc<Image>) -> Self2449     pub fn image(image: Arc<Image>) -> Self {
2450         Self {
2451             src_stages: PipelineStages::empty(),
2452             src_access: AccessFlags::empty(),
2453             dst_stages: PipelineStages::empty(),
2454             dst_access: AccessFlags::empty(),
2455             old_layout: ImageLayout::Undefined,
2456             new_layout: ImageLayout::Undefined,
2457             queue_family_ownership_transfer: None,
2458             image,
2459             subresource_range: ImageSubresourceRange {
2460                 aspects: ImageAspects::empty(), // Can't use image format aspects because `color` can't be specified with `planeN`.
2461                 mip_levels: 0..0,
2462                 array_layers: 0..0,
2463             },
2464             _ne: crate::NonExhaustive(()),
2465         }
2466     }
2467 }
2468 
2469 /// Specifies a queue family ownership transfer for a resource.
2470 ///
2471 /// There are three classes of queues that can be used in an ownership transfer:
2472 /// - A **local** queue exists on the current [`Instance`] and [`Device`].
2473 /// - An **external** queue does not exist on the current [`Instance`], but has the same
2474 ///   [`device_uuid`] and [`driver_uuid`] as the current [`Device`].
2475 /// - A **foreign** queue can be an external queue, or any queue on another device for which the
2476 ///   mentioned parameters do not match.
2477 ///
2478 /// [`Instance`]: crate::instance::Instance
2479 /// [`Device`]: crate::device::Device
2480 /// [`device_uuid`]: crate::device::Properties::device_uuid
2481 /// [`driver_uuid`]: crate::device::Properties::driver_uuid
2482 #[derive(Clone, Copy, Debug)]
2483 pub enum QueueFamilyOwnershipTransfer {
2484     /// For a resource with [`Sharing::Exclusive`], transfers ownership between two local queues.
2485     ///
2486     /// [`Sharing::Exclusive`]: crate::sync::Sharing::Exclusive
2487     ExclusiveBetweenLocal {
2488         /// The queue family that currently owns the resource.
2489         src_index: u32,
2490 
2491         /// The queue family to transfer ownership to.
2492         dst_index: u32,
2493     },
2494 
2495     /// For a resource with [`Sharing::Exclusive`], transfers ownership from a local queue to an
2496     /// external queue.
2497     ///
2498     /// The device API version must be at least 1.1, or the [`khr_external_memory`] extension must
2499     /// be enabled on the device.
2500     ///
2501     /// [`Sharing::Exclusive`]: crate::sync::Sharing::Exclusive
2502     /// [`khr_external_memory`]: crate::device::DeviceExtensions::khr_external_memory
2503     ExclusiveToExternal {
2504         /// The queue family that currently owns the resource.
2505         src_index: u32,
2506     },
2507 
2508     /// For a resource with [`Sharing::Exclusive`], transfers ownership from an external queue to a
2509     /// local queue.
2510     ///
2511     /// The device API version must be at least 1.1, or the [`khr_external_memory`] extension must
2512     /// be enabled on the device.
2513     ///
2514     /// [`Sharing::Exclusive`]: crate::sync::Sharing::Exclusive
2515     /// [`khr_external_memory`]: crate::device::DeviceExtensions::khr_external_memory
2516     ExclusiveFromExternal {
2517         /// The queue family to transfer ownership to.
2518         dst_index: u32,
2519     },
2520 
2521     /// For a resource with [`Sharing::Exclusive`], transfers ownership from a local queue to a
2522     /// foreign queue.
2523     ///
2524     /// The [`ext_queue_family_foreign`] extension must be enabled on the device.
2525     ///
2526     /// [`Sharing::Exclusive`]: crate::sync::Sharing::Exclusive
2527     /// [`ext_queue_family_foreign`]: crate::device::DeviceExtensions::ext_queue_family_foreign
2528     ExclusiveToForeign {
2529         /// The queue family that currently owns the resource.
2530         src_index: u32,
2531     },
2532 
2533     /// For a resource with [`Sharing::Exclusive`], transfers ownership from a foreign queue to a
2534     /// local queue.
2535     ///
2536     /// The [`ext_queue_family_foreign`] extension must be enabled on the device.
2537     ///
2538     /// [`Sharing::Exclusive`]: crate::sync::Sharing::Exclusive
2539     /// [`ext_queue_family_foreign`]: crate::device::DeviceExtensions::ext_queue_family_foreign
2540     ExclusiveFromForeign {
2541         /// The queue family to transfer ownership to.
2542         dst_index: u32,
2543     },
2544 
2545     /// For a resource with [`Sharing::Concurrent`], transfers ownership from its local queues to
2546     /// an external queue.
2547     ///
2548     /// The device API version must be at least 1.1, or the [`khr_external_memory`] extension must
2549     /// be enabled on the device.
2550     ///
2551     /// [`Sharing::Concurrent`]: crate::sync::Sharing::Concurrent
2552     /// [`khr_external_memory`]: crate::device::DeviceExtensions::khr_external_memory
2553     ConcurrentToExternal,
2554 
2555     /// For a resource with [`Sharing::Concurrent`], transfers ownership from an external queue to
2556     /// its local queues.
2557     ///
2558     /// The device API version must be at least 1.1, or the [`khr_external_memory`] extension must
2559     /// be enabled on the device.
2560     ///
2561     /// [`Sharing::Concurrent`]: crate::sync::Sharing::Concurrent
2562     /// [`khr_external_memory`]: crate::device::DeviceExtensions::khr_external_memory
2563     ConcurrentFromExternal,
2564 
2565     /// For a resource with [`Sharing::Concurrent`], transfers ownership from its local queues to
2566     /// a foreign queue.
2567     ///
2568     /// The [`ext_queue_family_foreign`] extension must be enabled on the device.
2569     ///
2570     /// [`Sharing::Concurrent`]: crate::sync::Sharing::Concurrent
2571     /// [`ext_queue_family_foreign`]: crate::device::DeviceExtensions::ext_queue_family_foreign
2572     ConcurrentToForeign,
2573 
2574     /// For a resource with [`Sharing::Concurrent`], transfers ownership from a foreign queue to
2575     /// its local queues.
2576     ///
2577     /// The [`ext_queue_family_foreign`] extension must be enabled on the device.
2578     ///
2579     /// [`Sharing::Concurrent`]: crate::sync::Sharing::Concurrent
2580     /// [`ext_queue_family_foreign`]: crate::device::DeviceExtensions::ext_queue_family_foreign
2581     ConcurrentFromForeign,
2582 }
2583 
2584 impl QueueFamilyOwnershipTransfer {
validate_device(self, device: &Device) -> Result<(), RequirementNotMet>2585     pub(crate) fn validate_device(self, device: &Device) -> Result<(), RequirementNotMet> {
2586         match self {
2587             QueueFamilyOwnershipTransfer::ExclusiveToExternal { .. } => {
2588                 if !(device.api_version() >= Version::V1_1
2589                     || device.enabled_extensions().khr_external_memory)
2590                 {
2591                     return Err(crate::RequirementNotMet {
2592                         required_for: "`QueueFamilyOwnershipTransfer::ExclusiveToExternal",
2593                         requires_one_of: crate::RequiresOneOf {
2594                             api_version: Some(Version::V1_1),
2595                             device_extensions: &["khr_external_memory"],
2596                             ..Default::default()
2597                         },
2598                     });
2599                 }
2600             }
2601             QueueFamilyOwnershipTransfer::ExclusiveFromExternal { .. } => {
2602                 if !(device.api_version() >= Version::V1_1
2603                     || device.enabled_extensions().khr_external_memory)
2604                 {
2605                     return Err(crate::RequirementNotMet {
2606                         required_for: "`QueueFamilyOwnershipTransfer::ExclusiveFromExternal",
2607                         requires_one_of: crate::RequiresOneOf {
2608                             api_version: Some(Version::V1_1),
2609                             device_extensions: &["khr_external_memory"],
2610                             ..Default::default()
2611                         },
2612                     });
2613                 }
2614             }
2615             QueueFamilyOwnershipTransfer::ExclusiveToForeign { .. } => {
2616                 if !device.enabled_extensions().ext_queue_family_foreign {
2617                     return Err(crate::RequirementNotMet {
2618                         required_for: "`QueueFamilyOwnershipTransfer::ExclusiveToForeign",
2619                         requires_one_of: crate::RequiresOneOf {
2620                             device_extensions: &["ext_queue_family_foreign"],
2621                             ..Default::default()
2622                         },
2623                     });
2624                 }
2625             }
2626             QueueFamilyOwnershipTransfer::ExclusiveFromForeign { .. } => {
2627                 if !device.enabled_extensions().ext_queue_family_foreign {
2628                     return Err(crate::RequirementNotMet {
2629                         required_for: "`QueueFamilyOwnershipTransfer::ExclusiveFromForeign",
2630                         requires_one_of: crate::RequiresOneOf {
2631                             device_extensions: &["ext_queue_family_foreign"],
2632                             ..Default::default()
2633                         },
2634                     });
2635                 }
2636             }
2637             QueueFamilyOwnershipTransfer::ConcurrentToExternal => {
2638                 if !(device.api_version() >= Version::V1_1
2639                     || device.enabled_extensions().khr_external_memory)
2640                 {
2641                     return Err(crate::RequirementNotMet {
2642                         required_for: "`QueueFamilyOwnershipTransfer::ConcurrentToExternal",
2643                         requires_one_of: crate::RequiresOneOf {
2644                             api_version: Some(Version::V1_1),
2645                             device_extensions: &["khr_external_memory"],
2646                             ..Default::default()
2647                         },
2648                     });
2649                 }
2650             }
2651             QueueFamilyOwnershipTransfer::ConcurrentFromExternal => {
2652                 if !(device.api_version() >= Version::V1_1
2653                     || device.enabled_extensions().khr_external_memory)
2654                 {
2655                     return Err(crate::RequirementNotMet {
2656                         required_for: "`QueueFamilyOwnershipTransfer::ConcurrentFromExternal",
2657                         requires_one_of: crate::RequiresOneOf {
2658                             api_version: Some(Version::V1_1),
2659                             device_extensions: &["khr_external_memory"],
2660                             ..Default::default()
2661                         },
2662                     });
2663                 }
2664             }
2665             QueueFamilyOwnershipTransfer::ConcurrentToForeign => {
2666                 if !device.enabled_extensions().ext_queue_family_foreign {
2667                     return Err(crate::RequirementNotMet {
2668                         required_for: "`QueueFamilyOwnershipTransfer::ConcurrentToForeign",
2669                         requires_one_of: crate::RequiresOneOf {
2670                             device_extensions: &["ext_queue_family_foreign"],
2671                             ..Default::default()
2672                         },
2673                     });
2674                 }
2675             }
2676             QueueFamilyOwnershipTransfer::ConcurrentFromForeign => {
2677                 if !device.enabled_extensions().ext_queue_family_foreign {
2678                     return Err(crate::RequirementNotMet {
2679                         required_for: "`QueueFamilyOwnershipTransfer::ConcurrentFromForeign",
2680                         requires_one_of: crate::RequiresOneOf {
2681                             device_extensions: &["ext_queue_family_foreign"],
2682                             ..Default::default()
2683                         },
2684                     });
2685                 }
2686             }
2687             _ => (),
2688         }
2689 
2690         Ok(())
2691     }
2692 }
2693 
2694 impl From<QueueFamilyOwnershipTransfer> for (u32, u32) {
from(val: QueueFamilyOwnershipTransfer) -> Self2695     fn from(val: QueueFamilyOwnershipTransfer) -> Self {
2696         match val {
2697             QueueFamilyOwnershipTransfer::ExclusiveBetweenLocal {
2698                 src_index,
2699                 dst_index,
2700             } => (src_index, dst_index),
2701             QueueFamilyOwnershipTransfer::ExclusiveToExternal { src_index } => {
2702                 (src_index, ash::vk::QUEUE_FAMILY_EXTERNAL)
2703             }
2704             QueueFamilyOwnershipTransfer::ExclusiveFromExternal { dst_index } => {
2705                 (ash::vk::QUEUE_FAMILY_EXTERNAL, dst_index)
2706             }
2707             QueueFamilyOwnershipTransfer::ExclusiveToForeign { src_index } => {
2708                 (src_index, ash::vk::QUEUE_FAMILY_FOREIGN_EXT)
2709             }
2710             QueueFamilyOwnershipTransfer::ExclusiveFromForeign { dst_index } => {
2711                 (ash::vk::QUEUE_FAMILY_FOREIGN_EXT, dst_index)
2712             }
2713             QueueFamilyOwnershipTransfer::ConcurrentToExternal => (
2714                 ash::vk::QUEUE_FAMILY_IGNORED,
2715                 ash::vk::QUEUE_FAMILY_EXTERNAL,
2716             ),
2717             QueueFamilyOwnershipTransfer::ConcurrentFromExternal => (
2718                 ash::vk::QUEUE_FAMILY_EXTERNAL,
2719                 ash::vk::QUEUE_FAMILY_IGNORED,
2720             ),
2721             QueueFamilyOwnershipTransfer::ConcurrentToForeign => (
2722                 ash::vk::QUEUE_FAMILY_IGNORED,
2723                 ash::vk::QUEUE_FAMILY_FOREIGN_EXT,
2724             ),
2725             QueueFamilyOwnershipTransfer::ConcurrentFromForeign => (
2726                 ash::vk::QUEUE_FAMILY_FOREIGN_EXT,
2727                 ash::vk::QUEUE_FAMILY_IGNORED,
2728             ),
2729         }
2730     }
2731 }
2732