xref: /aosp_15_r20/external/mesa3d/src/nouveau/vulkan/nvk_edb_bview_cache.h (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2022 Collabora Ltd. and Red Hat Inc.
3  * SPDX-License-Identifier: MIT
4  */
5 #ifndef NVK_EDB_BVIEW_CACHE_H
6 #define NVK_EDB_BVIEW_CACHE_H 1
7 
8 #include "nvk_private.h"
9 
10 #include "nvk_descriptor_types.h"
11 
12 #include "util/format/u_formats.h"
13 
14 struct hash_table;
15 struct nvk_device;
16 
17 /** A cache of VK_EXT_descriptor_buffer BufferViews
18  *
19  * VK_EXT_descriptor_buffer effectively removes the concept of a VkBufferView
20  * object.  Instead of allocating a view object and passing that into
21  * vkGetDescriptorEXT() like you do for image views, typed buffers work more
22  * like untyped UBOs or SSBOs and you just pass a base address, size (in
23  * bytes) and format to vkGetDescriptorEXT().  On NVIDIA hardware, this is
24  * annoying because it means we no longer have an object to help us manage the
25  * life cycle of the descriptor on the heap.
26  *
27  * The solution is nvk_edb_bview_cache.  This cache stores enough typed buffer
28  * descriptors to cover the entire address space.  For each buffer format, we
29  * allocate 512 4 GiB buffer views, spaced at 2 GiB intervals.  This ensures
30  * that every client buffer view will live entirely inside one of these views.
31  * The descriptor we return from vkGetDescriptorEXT() contains the descriptor
32  * index to the HW descriptor as well as an offset and size (both in surface
33  * elements) and the alpha value to expect for OOB writes.
34  *
35  * For RGB32 formats, we place 3 3 GiB buffer views every 1.5 GiB in the
36  * address space.  We need 3 per chunk because RGB32 buffer views only have a
37  * minimum alignment of 4B but the offsetting we do in the shader is in terms
38  * of surface elements.  For offsetting by 1 or 2 components, we need a
39  * different view.  The reason why it's 3 GiB instead of 4 GiB is because
40  * Ampere reduced the maximum size of an RGB32 buffer view to 3 GiB.
41  *
42  * In nvk_nir_lower_descriptors(), we lower all texture or image buffer access
43  * to an access through one of these HW descriptors.  Bounds checkinig is done
44  * in software and the offset is applied to ensure that we only ever read from
45  * the memory range specified by the client.  The HW descriptor only exists to
46  * help with format conversion.
47  */
48 struct nvk_edb_bview_cache {
49    struct hash_table *cache;
50 };
51 
52 VkResult
53 nvk_edb_bview_cache_init(struct nvk_device *dev,
54                          struct nvk_edb_bview_cache *cache);
55 
56 /* It's safe to call this function on a zeroed nvk_edb_bview_cache */
57 void
58 nvk_edb_bview_cache_finish(struct nvk_device *dev,
59                            struct nvk_edb_bview_cache *cache);
60 
61 struct nvk_edb_buffer_view_descriptor
62 nvk_edb_bview_cache_get_descriptor(struct nvk_device *dev,
63                                    struct nvk_edb_bview_cache *cache,
64                                    uint64_t base_addr, uint64_t size_B,
65                                    enum pipe_format format);
66 
67 #endif /* NVK_EDB_BVIEW_CACHE_H */
68