1 // Copyright (c) 2016 The vulkano developers
2 // Licensed under the Apache License, Version 2.0
3 // <LICENSE-APACHE or
4 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT
5 // license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
6 // at your option. All files in the project carrying such
7 // notice may not be copied, modified, or distributed except
8 // according to those terms.
9
10 //! Device memory allocation and memory pools.
11 //!
12 //! By default, memory allocation is automatically handled by the vulkano library when you create
13 //! a buffer or an image. But if you want more control, you have the possibility to customise the
14 //! memory allocation strategy.
15 //!
16 //! # Memory types and heaps
17 //!
18 //! A physical device is composed of one or more **memory heaps**. A memory heap is a pool of
19 //! memory that can be allocated.
20 //!
21 //! ```
22 //! // Enumerating memory heaps.
23 //! # let physical_device: vulkano::device::physical::PhysicalDevice = return;
24 //! for (index, heap) in physical_device.memory_properties().memory_heaps.iter().enumerate() {
25 //! println!("Heap #{:?} has a capacity of {:?} bytes", index, heap.size);
26 //! }
27 //! ```
28 //!
29 //! However you can't allocate directly from a memory heap. A memory heap is shared amongst one or
30 //! multiple **memory types**, which you can allocate memory from. Each memory type has different
31 //! characteristics.
32 //!
33 //! A memory type may or may not be visible to the host. In other words, it may or may not be
34 //! directly writable by the CPU. A memory type may or may not be device-local. A device-local
35 //! memory type has a much quicker access time from the GPU than a non-device-local type. Note
36 //! that non-device-local memory types are still accessible by the device, they are just slower.
37 //!
38 //! ```
39 //! // Enumerating memory types.
40 //! # let physical_device: vulkano::device::physical::PhysicalDevice = return;
41 //! for ty in physical_device.memory_properties().memory_types.iter() {
42 //! println!("Memory type belongs to heap #{:?}", ty.heap_index);
43 //! println!("Property flags: {:?}", ty.property_flags);
44 //! }
45 //! ```
46 //!
47 //! Memory types are order from "best" to "worse". In other words, the implementation prefers that
48 //! you use the memory types that are earlier in the list. This means that selecting a memory type
49 //! should always be done by enumerating them and taking the first one that matches our criteria.
50 //!
51 //! ## In practice
52 //!
53 //! In practice, desktop machines usually have two memory heaps: one that represents the RAM of
54 //! the CPU, and one that represents the RAM of the GPU. The CPU's RAM is host-accessible but not
55 //! device-local, while the GPU's RAM is not host-accessible but is device-local.
56 //!
57 //! Mobile machines usually have a single memory heap that is "equally local" to both the CPU and
58 //! the GPU. It is both host-accessible and device-local.
59 //!
60 //! # Allocating memory and memory pools
61 //!
62 //! Allocating memory can be done by calling `DeviceMemory::allocate()`.
63 //!
64 //! Here is an example:
65 //!
66 //! ```
67 //! use vulkano::memory::{DeviceMemory, MemoryAllocateInfo};
68 //!
69 //! # let device: std::sync::Arc<vulkano::device::Device> = return;
70 //! // Taking the first memory type for the sake of this example.
71 //! let memory_type_index = 0;
72 //!
73 //! let memory = DeviceMemory::allocate(
74 //! device.clone(),
75 //! MemoryAllocateInfo {
76 //! allocation_size: 1024,
77 //! memory_type_index,
78 //! ..Default::default()
79 //! },
80 //! ).expect("Failed to allocate memory");
81 //!
82 //! // The memory is automatically freed when `memory` is destroyed.
83 //! ```
84 //!
85 //! However allocating and freeing memory is very slow (up to several hundred milliseconds
86 //! sometimes). Instead you are strongly encouraged to use a memory pool. A memory pool is not
87 //! a Vulkan concept but a vulkano concept.
88 //!
89 //! A memory pool is any object that implements the `MemoryPool` trait. You can implement that
90 //! trait on your own structure and then use it when you create buffers and images so that they
91 //! get memory from that pool. By default if you don't specify any pool when creating a buffer or
92 //! an image, an instance of `StandardMemoryPool` that is shared by the `Device` object is used.
93
94 pub use self::alignment::DeviceAlignment;
95 use self::allocator::DeviceLayout;
96 pub use self::device_memory::{
97 DeviceMemory, DeviceMemoryError, ExternalMemoryHandleType, ExternalMemoryHandleTypes,
98 MappedDeviceMemory, MemoryAllocateFlags, MemoryAllocateInfo, MemoryImportInfo, MemoryMapError,
99 };
100 use crate::{
101 buffer::{sys::RawBuffer, Subbuffer},
102 image::{sys::RawImage, ImageAccess, ImageAspects},
103 macros::vulkan_bitflags,
104 sync::semaphore::Semaphore,
105 DeviceSize,
106 };
107 use std::{
108 num::NonZeroU64,
109 ops::{Bound, Range, RangeBounds, RangeTo},
110 sync::Arc,
111 };
112
113 mod alignment;
114 pub mod allocator;
115 mod device_memory;
116
117 /// Properties of the memory in a physical device.
118 #[derive(Clone, Debug)]
119 #[non_exhaustive]
120 pub struct MemoryProperties {
121 /// The available memory types.
122 pub memory_types: Vec<MemoryType>,
123
124 /// The available memory heaps.
125 pub memory_heaps: Vec<MemoryHeap>,
126 }
127
128 impl From<ash::vk::PhysicalDeviceMemoryProperties> for MemoryProperties {
129 #[inline]
from(val: ash::vk::PhysicalDeviceMemoryProperties) -> Self130 fn from(val: ash::vk::PhysicalDeviceMemoryProperties) -> Self {
131 Self {
132 memory_types: val.memory_types[0..val.memory_type_count as usize]
133 .iter()
134 .map(|vk_memory_type| MemoryType {
135 property_flags: vk_memory_type.property_flags.into(),
136 heap_index: vk_memory_type.heap_index,
137 })
138 .collect(),
139 memory_heaps: val.memory_heaps[0..val.memory_heap_count as usize]
140 .iter()
141 .map(|vk_memory_heap| MemoryHeap {
142 size: vk_memory_heap.size,
143 flags: vk_memory_heap.flags.into(),
144 })
145 .collect(),
146 }
147 }
148 }
149
150 /// A memory type in a physical device.
151 #[derive(Clone, Debug)]
152 #[non_exhaustive]
153 pub struct MemoryType {
154 /// The properties of this memory type.
155 pub property_flags: MemoryPropertyFlags,
156
157 /// The index of the memory heap that this memory type corresponds to.
158 pub heap_index: u32,
159 }
160
161 vulkan_bitflags! {
162 #[non_exhaustive]
163
164 /// Properties of a memory type.
165 MemoryPropertyFlags = MemoryPropertyFlags(u32);
166
167 /// The memory is located on the device, and is allocated from a heap that also has the
168 /// [`DEVICE_LOCAL`] flag set.
169 ///
170 /// For some devices, particularly integrated GPUs, the device shares memory with the host and
171 /// all memory may be device-local, so the distinction is moot. However, if the device has
172 /// non-device-local memory, it is usually faster for the device to access device-local memory.
173 /// Therefore, device-local memory is preferred for data that will only be accessed by
174 /// the device.
175 ///
176 /// If the device and host do not share memory, data transfer between host and device may
177 /// involve sending the data over the data bus that connects the two. Accesses are faster if
178 /// they do not have to cross this barrier: device-local memory is fast for the device to
179 /// access, but slower to access by the host. However, there are devices that share memory with
180 /// the host, yet have distinct device-local and non-device local memory types. In that case,
181 /// the speed difference may not be large.
182 ///
183 /// For data transfer between host and device, it is most efficient if the memory is located
184 /// at the destination of the transfer. Thus, if [`HOST_VISIBLE`] versions of both are
185 /// available, device-local memory is preferred for host-to-device data transfer, while
186 /// non-device-local memory is preferred for device-to-host data transfer. This is because data
187 /// is usually written only once but potentially read several times, and because reads can take
188 /// advantage of caching while writes cannot.
189 ///
190 /// Devices may have memory types that are neither `DEVICE_LOCAL` nor [`HOST_VISIBLE`]. This
191 /// is regular host memory that is made available to the device exclusively. Although it will be
192 /// slower to access from the device than `DEVICE_LOCAL` memory, it can be faster than
193 /// [`HOST_VISIBLE`] memory. It can be used as overflow space if the device is out of memory.
194 ///
195 /// [`DEVICE_LOCAL`]: MemoryHeapFlags::DEVICE_LOCAL
196 /// [`HOST_VISIBLE`]: MemoryPropertyFlags::HOST_VISIBLE
197 DEVICE_LOCAL = DEVICE_LOCAL,
198
199 /// The memory can be mapped into the memory space of the host and accessed as regular RAM.
200 ///
201 /// Memory of this type is required to transfer data between the host and the device. If
202 /// the memory is going to be accessed by the device more than a few times, it is recommended
203 /// to copy the data to non-`HOST_VISIBLE` memory first if it is available.
204 ///
205 /// `HOST_VISIBLE` memory is always at least either [`HOST_COHERENT`] or [`HOST_CACHED`],
206 /// but it can be both.
207 ///
208 /// [`HOST_COHERENT`]: MemoryPropertyFlags::HOST_COHERENT
209 /// [`HOST_CACHED`]: MemoryPropertyFlags::HOST_CACHED
210 HOST_VISIBLE = HOST_VISIBLE,
211
212 /// Host access to the memory does not require calling [`invalidate_range`] to make device
213 /// writes visible to the host, nor [`flush_range`] to flush host writes back to the device.
214 ///
215 /// [`invalidate_range`]: MappedDeviceMemory::invalidate_range
216 /// [`flush_range`]: MappedDeviceMemory::flush_range
217 HOST_COHERENT = HOST_COHERENT,
218
219 /// The memory is cached by the host.
220 ///
221 /// `HOST_CACHED` memory is fast for reads and random access from the host, so it is preferred
222 /// for device-to-host data transfer. Memory that is [`HOST_VISIBLE`] but not `HOST_CACHED` is
223 /// often slow for all accesses other than sequential writing, so it is more suited for
224 /// host-to-device transfer, and it is often beneficial to write the data in sequence.
225 ///
226 /// [`HOST_VISIBLE`]: MemoryPropertyFlags::HOST_VISIBLE
227 HOST_CACHED = HOST_CACHED,
228
229 /// Allocations made from the memory are lazy.
230 ///
231 /// This means that no actual allocation is performed. Instead memory is automatically
232 /// allocated by the Vulkan implementation based on need. You can call
233 /// [`DeviceMemory::commitment`] to query how much memory is currently committed to an
234 /// allocation.
235 ///
236 /// Memory of this type can only be used on images created with a certain flag, and is never
237 /// [`HOST_VISIBLE`].
238 ///
239 /// [`HOST_VISIBLE`]: MemoryPropertyFlags::HOST_VISIBLE
240 LAZILY_ALLOCATED = LAZILY_ALLOCATED,
241
242 /// The memory can only be accessed by the device, and allows protected queue access.
243 ///
244 /// Memory of this type is never [`HOST_VISIBLE`], [`HOST_COHERENT`] or [`HOST_CACHED`].
245 ///
246 /// [`HOST_VISIBLE`]: MemoryPropertyFlags::HOST_VISIBLE
247 /// [`HOST_COHERENT`]: MemoryPropertyFlags::HOST_COHERENT
248 /// [`HOST_CACHED`]: MemoryPropertyFlags::HOST_CACHED
249 PROTECTED = PROTECTED {
250 api_version: V1_1,
251 },
252
253 /// Device accesses to the memory are automatically made available and visible to other device
254 /// accesses.
255 ///
256 /// Memory of this type is slower to access by the device, so it is best avoided for general
257 /// purpose use. Because of its coherence properties, however, it may be useful for debugging.
258 DEVICE_COHERENT = DEVICE_COHERENT_AMD {
259 device_extensions: [amd_device_coherent_memory],
260 },
261
262 /// The memory is not cached on the device.
263 ///
264 /// `DEVICE_UNCACHED` memory is always also [`DEVICE_COHERENT`].
265 ///
266 /// [`DEVICE_COHERENT`]: MemoryPropertyFlags::DEVICE_COHERENT
267 DEVICE_UNCACHED = DEVICE_UNCACHED_AMD {
268 device_extensions: [amd_device_coherent_memory],
269 },
270
271 /// Other devices can access the memory via remote direct memory access (RDMA).
272 RDMA_CAPABLE = RDMA_CAPABLE_NV {
273 device_extensions: [nv_external_memory_rdma],
274 },
275 }
276
277 /// A memory heap in a physical device.
278 #[derive(Clone, Debug)]
279 #[non_exhaustive]
280 pub struct MemoryHeap {
281 /// The size of the heap in bytes.
282 pub size: DeviceSize,
283
284 /// Attributes of the heap.
285 pub flags: MemoryHeapFlags,
286 }
287
288 vulkan_bitflags! {
289 #[non_exhaustive]
290
291 /// Attributes of a memory heap.
292 MemoryHeapFlags = MemoryHeapFlags(u32);
293
294 /// The heap corresponds to device-local memory.
295 DEVICE_LOCAL = DEVICE_LOCAL,
296
297 /// If used on a logical device that represents more than one physical device, allocations are
298 /// replicated across each physical device's instance of this heap.
299 MULTI_INSTANCE = MULTI_INSTANCE {
300 api_version: V1_1,
301 instance_extensions: [khr_device_group_creation],
302 },
303 }
304
305 /// Represents requirements expressed by the Vulkan implementation when it comes to binding memory
306 /// to a resource.
307 #[derive(Clone, Copy, Debug)]
308 pub struct MemoryRequirements {
309 /// Memory layout required for the resource.
310 pub layout: DeviceLayout,
311
312 /// Indicates which memory types can be used. Each bit that is set to 1 means that the memory
313 /// type whose index is the same as the position of the bit can be used.
314 pub memory_type_bits: u32,
315
316 /// Whether implementation prefers to use dedicated allocations (in other words, allocate
317 /// a whole block of memory dedicated to this resource alone).
318 /// This will be `false` if the device API version is less than 1.1 and the
319 /// [`khr_get_memory_requirements2`](crate::device::DeviceExtensions::khr_get_memory_requirements2)
320 /// extension is not enabled on the device.
321 pub prefers_dedicated_allocation: bool,
322
323 /// Whether implementation requires the use of a dedicated allocation (in other words, allocate
324 /// a whole block of memory dedicated to this resource alone).
325 /// This will be `false` if the device API version is less than 1.1 and the
326 /// [`khr_get_memory_requirements2`](crate::device::DeviceExtensions::khr_get_memory_requirements2)
327 /// extension is not enabled on the device.
328 pub requires_dedicated_allocation: bool,
329 }
330
331 /// Indicates a specific resource to allocate memory for.
332 ///
333 /// Using dedicated allocations can yield better performance, but requires the
334 /// [`khr_dedicated_allocation`](crate::device::DeviceExtensions::khr_dedicated_allocation)
335 /// extension to be enabled on the device.
336 ///
337 /// If a dedicated allocation is performed, it must not be bound to any resource other than the
338 /// one that was passed with the enumeration.
339 #[derive(Clone, Copy, Debug)]
340 pub enum DedicatedAllocation<'a> {
341 /// Allocation dedicated to a buffer.
342 Buffer(&'a RawBuffer),
343 /// Allocation dedicated to an image.
344 Image(&'a RawImage),
345 }
346
347 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
348 pub(crate) enum DedicatedTo {
349 Buffer(NonZeroU64),
350 Image(NonZeroU64),
351 }
352
353 impl From<DedicatedAllocation<'_>> for DedicatedTo {
from(dedicated_allocation: DedicatedAllocation<'_>) -> Self354 fn from(dedicated_allocation: DedicatedAllocation<'_>) -> Self {
355 match dedicated_allocation {
356 DedicatedAllocation::Buffer(buffer) => Self::Buffer(buffer.id()),
357 DedicatedAllocation::Image(image) => Self::Image(image.id()),
358 }
359 }
360 }
361
362 /// The properties for exporting or importing external memory, when a buffer or image is created
363 /// with a specific configuration.
364 #[derive(Clone, Debug, Default)]
365 #[non_exhaustive]
366 pub struct ExternalMemoryProperties {
367 /// Whether a dedicated memory allocation is required for the queried external handle type.
368 pub dedicated_only: bool,
369
370 /// Whether memory can be exported to an external source with the queried
371 /// external handle type.
372 pub exportable: bool,
373
374 /// Whether memory can be imported from an external source with the queried
375 /// external handle type.
376 pub importable: bool,
377
378 /// Which external handle types can be re-exported after the queried external handle type has
379 /// been imported.
380 pub export_from_imported_handle_types: ExternalMemoryHandleTypes,
381
382 /// Which external handle types can be enabled along with the queried external handle type
383 /// when creating the buffer or image.
384 pub compatible_handle_types: ExternalMemoryHandleTypes,
385 }
386
387 impl From<ash::vk::ExternalMemoryProperties> for ExternalMemoryProperties {
388 #[inline]
from(val: ash::vk::ExternalMemoryProperties) -> Self389 fn from(val: ash::vk::ExternalMemoryProperties) -> Self {
390 Self {
391 dedicated_only: val
392 .external_memory_features
393 .intersects(ash::vk::ExternalMemoryFeatureFlags::DEDICATED_ONLY),
394 exportable: val
395 .external_memory_features
396 .intersects(ash::vk::ExternalMemoryFeatureFlags::EXPORTABLE),
397 importable: val
398 .external_memory_features
399 .intersects(ash::vk::ExternalMemoryFeatureFlags::IMPORTABLE),
400 export_from_imported_handle_types: val.export_from_imported_handle_types.into(),
401 compatible_handle_types: val.compatible_handle_types.into(),
402 }
403 }
404 }
405
406 /// Parameters to execute sparse bind operations on a queue.
407 #[derive(Clone, Debug)]
408 pub struct BindSparseInfo {
409 /// The semaphores to wait for before beginning the execution of this batch of
410 /// sparse bind operations.
411 ///
412 /// The default value is empty.
413 pub wait_semaphores: Vec<Arc<Semaphore>>,
414
415 /// The bind operations to perform for buffers.
416 ///
417 /// The default value is empty.
418 pub buffer_binds: Vec<(Subbuffer<[u8]>, Vec<SparseBufferMemoryBind>)>,
419
420 /// The bind operations to perform for images with an opaque memory layout.
421 ///
422 /// This should be used for mip tail regions, the metadata aspect, and for the normal regions
423 /// of images that do not have the `sparse_residency` flag set.
424 ///
425 /// The default value is empty.
426 pub image_opaque_binds: Vec<(Arc<dyn ImageAccess>, Vec<SparseImageOpaqueMemoryBind>)>,
427
428 /// The bind operations to perform for images with a known memory layout.
429 ///
430 /// This type of sparse bind can only be used for images that have the `sparse_residency`
431 /// flag set.
432 /// Only the normal texel regions can be bound this way, not the mip tail regions or metadata
433 /// aspect.
434 ///
435 /// The default value is empty.
436 pub image_binds: Vec<(Arc<dyn ImageAccess>, Vec<SparseImageMemoryBind>)>,
437
438 /// The semaphores to signal after the execution of this batch of sparse bind operations
439 /// has completed.
440 ///
441 /// The default value is empty.
442 pub signal_semaphores: Vec<Arc<Semaphore>>,
443
444 pub _ne: crate::NonExhaustive,
445 }
446
447 impl Default for BindSparseInfo {
448 #[inline]
default() -> Self449 fn default() -> Self {
450 Self {
451 wait_semaphores: Vec::new(),
452 buffer_binds: Vec::new(),
453 image_opaque_binds: Vec::new(),
454 image_binds: Vec::new(),
455 signal_semaphores: Vec::new(),
456 _ne: crate::NonExhaustive(()),
457 }
458 }
459 }
460
461 /// Parameters for a single sparse bind operation on a buffer.
462 #[derive(Clone, Debug, Default)]
463 pub struct SparseBufferMemoryBind {
464 /// The offset in bytes from the start of the buffer's memory, where memory is to be (un)bound.
465 ///
466 /// The default value is `0`.
467 pub offset: DeviceSize,
468
469 /// The size in bytes of the memory to be (un)bound.
470 ///
471 /// The default value is `0`, which must be overridden.
472 pub size: DeviceSize,
473
474 /// If `Some`, specifies the memory and an offset into that memory that is to be bound.
475 /// The provided memory must match the buffer's memory requirements.
476 ///
477 /// If `None`, specifies that existing memory at the specified location is to be unbound.
478 ///
479 /// The default value is `None`.
480 pub memory: Option<(Arc<DeviceMemory>, DeviceSize)>,
481 }
482
483 /// Parameters for a single sparse bind operation on parts of an image with an opaque memory layout.
484 ///
485 /// This type of sparse bind should be used for mip tail regions, the metadata aspect, and for the
486 /// normal regions of images that do not have the `sparse_residency` flag set.
487 #[derive(Clone, Debug, Default)]
488 pub struct SparseImageOpaqueMemoryBind {
489 /// The offset in bytes from the start of the image's memory, where memory is to be (un)bound.
490 ///
491 /// The default value is `0`.
492 pub offset: DeviceSize,
493
494 /// The size in bytes of the memory to be (un)bound.
495 ///
496 /// The default value is `0`, which must be overridden.
497 pub size: DeviceSize,
498
499 /// If `Some`, specifies the memory and an offset into that memory that is to be bound.
500 /// The provided memory must match the image's memory requirements.
501 ///
502 /// If `None`, specifies that existing memory at the specified location is to be unbound.
503 ///
504 /// The default value is `None`.
505 pub memory: Option<(Arc<DeviceMemory>, DeviceSize)>,
506
507 /// Sets whether the binding should apply to the metadata aspect of the image, or to the
508 /// normal texel data.
509 ///
510 /// The default value is `false`.
511 pub metadata: bool,
512 }
513
514 /// Parameters for a single sparse bind operation on parts of an image with a known memory layout.
515 ///
516 /// This type of sparse bind can only be used for images that have the `sparse_residency` flag set.
517 /// Only the normal texel regions can be bound this way, not the mip tail regions or metadata
518 /// aspect.
519 #[derive(Clone, Debug, Default)]
520 pub struct SparseImageMemoryBind {
521 /// The aspects of the image where memory is to be (un)bound.
522 ///
523 /// The default value is `ImageAspects::empty()`, which must be overridden.
524 pub aspects: ImageAspects,
525
526 /// The mip level of the image where memory is to be (un)bound.
527 ///
528 /// The default value is `0`.
529 pub mip_level: u32,
530
531 /// The array layer of the image where memory is to be (un)bound.
532 ///
533 /// The default value is `0`.
534 pub array_layer: u32,
535
536 /// The offset in texels (or for compressed images, texel blocks) from the origin of the image,
537 /// where memory is to be (un)bound.
538 ///
539 /// This must be a multiple of the
540 /// [`SparseImageFormatProperties::image_granularity`](crate::image::SparseImageFormatProperties::image_granularity)
541 /// value of the image.
542 ///
543 /// The default value is `[0; 3]`.
544 pub offset: [u32; 3],
545
546 /// The extent in texels (or for compressed images, texel blocks) of the image where
547 /// memory is to be (un)bound.
548 ///
549 /// This must be a multiple of the
550 /// [`SparseImageFormatProperties::image_granularity`](crate::image::SparseImageFormatProperties::image_granularity)
551 /// value of the image, or `offset + extent` for that dimension must equal the image's total
552 /// extent.
553 ///
554 /// The default value is `[0; 3]`, which must be overridden.
555 pub extent: [u32; 3],
556
557 /// If `Some`, specifies the memory and an offset into that memory that is to be bound.
558 /// The provided memory must match the image's memory requirements.
559 ///
560 /// If `None`, specifies that existing memory at the specified location is to be unbound.
561 ///
562 /// The default value is `None`.
563 pub memory: Option<(Arc<DeviceMemory>, DeviceSize)>,
564 }
565
566 #[inline(always)]
is_aligned(offset: DeviceSize, alignment: DeviceAlignment) -> bool567 pub(crate) fn is_aligned(offset: DeviceSize, alignment: DeviceAlignment) -> bool {
568 offset & (alignment.as_devicesize() - 1) == 0
569 }
570
571 /// Performs bounds-checking of a Vulkan memory range. Analog of `std::slice::range`.
range( range: impl RangeBounds<DeviceSize>, bounds: RangeTo<DeviceSize>, ) -> Option<Range<DeviceSize>>572 pub(crate) fn range(
573 range: impl RangeBounds<DeviceSize>,
574 bounds: RangeTo<DeviceSize>,
575 ) -> Option<Range<DeviceSize>> {
576 let len = bounds.end;
577
578 let start = match range.start_bound() {
579 Bound::Included(&start) => start,
580 Bound::Excluded(start) => start.checked_add(1)?,
581 Bound::Unbounded => 0,
582 };
583
584 let end = match range.end_bound() {
585 Bound::Included(end) => end.checked_add(1)?,
586 Bound::Excluded(&end) => end,
587 Bound::Unbounded => len,
588 };
589
590 (start <= end && end <= len).then_some(Range { start, end })
591 }
592